1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
27 #include <linux/stringify.h>
36 #define OpImplicit 1ull /* No generic decode */
37 #define OpReg 2ull /* Register */
38 #define OpMem 3ull /* Memory */
39 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40 #define OpDI 5ull /* ES:DI/EDI/RDI */
41 #define OpMem64 6ull /* Memory, 64-bit */
42 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43 #define OpDX 8ull /* DX register */
44 #define OpCL 9ull /* CL register (for shifts) */
45 #define OpImmByte 10ull /* 8-bit sign extended immediate */
46 #define OpOne 11ull /* Implied 1 */
47 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
48 #define OpMem16 13ull /* Memory operand (16-bit). */
49 #define OpMem32 14ull /* Memory operand (32-bit). */
50 #define OpImmU 15ull /* Immediate operand, zero extended */
51 #define OpSI 16ull /* SI/ESI/RSI */
52 #define OpImmFAddr 17ull /* Immediate far address */
53 #define OpMemFAddr 18ull /* Far address in memory */
54 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
55 #define OpES 20ull /* ES */
56 #define OpCS 21ull /* CS */
57 #define OpSS 22ull /* SS */
58 #define OpDS 23ull /* DS */
59 #define OpFS 24ull /* FS */
60 #define OpGS 25ull /* GS */
61 #define OpMem8 26ull /* 8-bit zero extended memory operand */
62 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
63 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
64 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
67 #define OpBits 5 /* Width of operand field */
68 #define OpMask ((1ull << OpBits) - 1)
71 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
79 /* Operand sizes: 8-bit operands or specified/overridden size. */
80 #define ByteOp (1<<0) /* 8-bit operands. */
81 /* Destination operand type. */
83 #define ImplicitOps (OpImplicit << DstShift)
84 #define DstReg (OpReg << DstShift)
85 #define DstMem (OpMem << DstShift)
86 #define DstAcc (OpAcc << DstShift)
87 #define DstDI (OpDI << DstShift)
88 #define DstMem64 (OpMem64 << DstShift)
89 #define DstImmUByte (OpImmUByte << DstShift)
90 #define DstDX (OpDX << DstShift)
91 #define DstAccLo (OpAccLo << DstShift)
92 #define DstMask (OpMask << DstShift)
93 /* Source operand type. */
95 #define SrcNone (OpNone << SrcShift)
96 #define SrcReg (OpReg << SrcShift)
97 #define SrcMem (OpMem << SrcShift)
98 #define SrcMem16 (OpMem16 << SrcShift)
99 #define SrcMem32 (OpMem32 << SrcShift)
100 #define SrcImm (OpImm << SrcShift)
101 #define SrcImmByte (OpImmByte << SrcShift)
102 #define SrcOne (OpOne << SrcShift)
103 #define SrcImmUByte (OpImmUByte << SrcShift)
104 #define SrcImmU (OpImmU << SrcShift)
105 #define SrcSI (OpSI << SrcShift)
106 #define SrcXLat (OpXLat << SrcShift)
107 #define SrcImmFAddr (OpImmFAddr << SrcShift)
108 #define SrcMemFAddr (OpMemFAddr << SrcShift)
109 #define SrcAcc (OpAcc << SrcShift)
110 #define SrcImmU16 (OpImmU16 << SrcShift)
111 #define SrcImm64 (OpImm64 << SrcShift)
112 #define SrcDX (OpDX << SrcShift)
113 #define SrcMem8 (OpMem8 << SrcShift)
114 #define SrcAccHi (OpAccHi << SrcShift)
115 #define SrcMask (OpMask << SrcShift)
116 #define BitOp (1<<11)
117 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
118 #define String (1<<13) /* String instruction (rep capable) */
119 #define Stack (1<<14) /* Stack instruction (push/pop) */
120 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
121 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
122 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
123 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
124 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
125 #define Escape (5<<15) /* Escape to coprocessor instruction */
126 #define Sse (1<<18) /* SSE Vector instruction */
127 /* Generic ModRM decode. */
128 #define ModRM (1<<19)
129 /* Destination is only written; never read. */
132 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
133 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
134 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
135 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
136 #define Undefined (1<<25) /* No Such Instruction */
137 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
138 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
140 #define PageTable (1 << 29) /* instruction used to write page table */
141 #define NotImpl (1 << 30) /* instruction is not implemented */
142 /* Source 2 operand type */
143 #define Src2Shift (31)
144 #define Src2None (OpNone << Src2Shift)
145 #define Src2Mem (OpMem << Src2Shift)
146 #define Src2CL (OpCL << Src2Shift)
147 #define Src2ImmByte (OpImmByte << Src2Shift)
148 #define Src2One (OpOne << Src2Shift)
149 #define Src2Imm (OpImm << Src2Shift)
150 #define Src2ES (OpES << Src2Shift)
151 #define Src2CS (OpCS << Src2Shift)
152 #define Src2SS (OpSS << Src2Shift)
153 #define Src2DS (OpDS << Src2Shift)
154 #define Src2FS (OpFS << Src2Shift)
155 #define Src2GS (OpGS << Src2Shift)
156 #define Src2Mask (OpMask << Src2Shift)
157 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
158 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
159 #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
160 #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
161 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
162 #define NoWrite ((u64)1 << 45) /* No writeback */
163 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
164 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
165 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
166 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
167 #define NoBigReal ((u64)1 << 50) /* No big real mode */
168 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
170 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
172 #define X2(x...) x, x
173 #define X3(x...) X2(x), x
174 #define X4(x...) X2(x), X2(x)
175 #define X5(x...) X4(x), x
176 #define X6(x...) X4(x), X2(x)
177 #define X7(x...) X4(x), X3(x)
178 #define X8(x...) X4(x), X4(x)
179 #define X16(x...) X8(x), X8(x)
181 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
182 #define FASTOP_SIZE 8
185 * fastop functions have a special calling convention:
190 * flags: rflags (in/out)
191 * ex: rsi (in:fastop pointer, out:zero if exception)
193 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
194 * different operand sizes can be reached by calculation, rather than a jump
195 * table (which would be bigger than the code).
197 * fastop functions are declared as taking a never-defined fastop parameter,
198 * so they can't be called from C directly.
207 int (*execute)(struct x86_emulate_ctxt *ctxt);
208 const struct opcode *group;
209 const struct group_dual *gdual;
210 const struct gprefix *gprefix;
211 const struct escape *esc;
212 void (*fastop)(struct fastop *fake);
214 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
218 struct opcode mod012[8];
219 struct opcode mod3[8];
223 struct opcode pfx_no;
224 struct opcode pfx_66;
225 struct opcode pfx_f2;
226 struct opcode pfx_f3;
231 struct opcode high[64];
234 /* EFLAGS bit definitions. */
235 #define EFLG_ID (1<<21)
236 #define EFLG_VIP (1<<20)
237 #define EFLG_VIF (1<<19)
238 #define EFLG_AC (1<<18)
239 #define EFLG_VM (1<<17)
240 #define EFLG_RF (1<<16)
241 #define EFLG_IOPL (3<<12)
242 #define EFLG_NT (1<<14)
243 #define EFLG_OF (1<<11)
244 #define EFLG_DF (1<<10)
245 #define EFLG_IF (1<<9)
246 #define EFLG_TF (1<<8)
247 #define EFLG_SF (1<<7)
248 #define EFLG_ZF (1<<6)
249 #define EFLG_AF (1<<4)
250 #define EFLG_PF (1<<2)
251 #define EFLG_CF (1<<0)
253 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
254 #define EFLG_RESERVED_ONE_MASK 2
256 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
258 if (!(ctxt->regs_valid & (1 << nr))) {
259 ctxt->regs_valid |= 1 << nr;
260 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
262 return ctxt->_regs[nr];
265 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
267 ctxt->regs_valid |= 1 << nr;
268 ctxt->regs_dirty |= 1 << nr;
269 return &ctxt->_regs[nr];
272 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
275 return reg_write(ctxt, nr);
278 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
282 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
283 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
286 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
288 ctxt->regs_dirty = 0;
289 ctxt->regs_valid = 0;
293 * These EFLAGS bits are restored from saved value during emulation, and
294 * any changes are written back to the saved value after emulation.
296 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
304 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
306 #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
307 #define FOP_RET "ret \n\t"
309 #define FOP_START(op) \
310 extern void em_##op(struct fastop *fake); \
311 asm(".pushsection .text, \"ax\" \n\t" \
312 ".global em_" #op " \n\t" \
319 #define FOPNOP() FOP_ALIGN FOP_RET
321 #define FOP1E(op, dst) \
322 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
324 #define FOP1EEX(op, dst) \
325 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
327 #define FASTOP1(op) \
332 ON64(FOP1E(op##q, rax)) \
335 /* 1-operand, using src2 (for MUL/DIV r/m) */
336 #define FASTOP1SRC2(op, name) \
341 ON64(FOP1E(op, rcx)) \
344 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
345 #define FASTOP1SRC2EX(op, name) \
350 ON64(FOP1EEX(op, rcx)) \
353 #define FOP2E(op, dst, src) \
354 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
356 #define FASTOP2(op) \
358 FOP2E(op##b, al, dl) \
359 FOP2E(op##w, ax, dx) \
360 FOP2E(op##l, eax, edx) \
361 ON64(FOP2E(op##q, rax, rdx)) \
364 /* 2 operand, word only */
365 #define FASTOP2W(op) \
368 FOP2E(op##w, ax, dx) \
369 FOP2E(op##l, eax, edx) \
370 ON64(FOP2E(op##q, rax, rdx)) \
373 /* 2 operand, src is CL */
374 #define FASTOP2CL(op) \
376 FOP2E(op##b, al, cl) \
377 FOP2E(op##w, ax, cl) \
378 FOP2E(op##l, eax, cl) \
379 ON64(FOP2E(op##q, rax, cl)) \
382 #define FOP3E(op, dst, src, src2) \
383 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
385 /* 3-operand, word-only, src2=cl */
386 #define FASTOP3WCL(op) \
389 FOP3E(op##w, ax, dx, cl) \
390 FOP3E(op##l, eax, edx, cl) \
391 ON64(FOP3E(op##q, rax, rdx, cl)) \
394 /* Special case for SETcc - 1 instruction per cc */
395 #define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
397 asm(".global kvm_fastop_exception \n"
398 "kvm_fastop_exception: xor %esi, %esi; ret");
419 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
422 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
423 enum x86_intercept intercept,
424 enum x86_intercept_stage stage)
426 struct x86_instruction_info info = {
427 .intercept = intercept,
428 .rep_prefix = ctxt->rep_prefix,
429 .modrm_mod = ctxt->modrm_mod,
430 .modrm_reg = ctxt->modrm_reg,
431 .modrm_rm = ctxt->modrm_rm,
432 .src_val = ctxt->src.val64,
433 .dst_val = ctxt->dst.val64,
434 .src_bytes = ctxt->src.bytes,
435 .dst_bytes = ctxt->dst.bytes,
436 .ad_bytes = ctxt->ad_bytes,
437 .next_rip = ctxt->eip,
440 return ctxt->ops->intercept(ctxt, &info, stage);
443 static void assign_masked(ulong *dest, ulong src, ulong mask)
445 *dest = (*dest & ~mask) | (src & mask);
448 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
450 return (1UL << (ctxt->ad_bytes << 3)) - 1;
453 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
456 struct desc_struct ss;
458 if (ctxt->mode == X86EMUL_MODE_PROT64)
460 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
461 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
464 static int stack_size(struct x86_emulate_ctxt *ctxt)
466 return (__fls(stack_mask(ctxt)) + 1) >> 3;
469 /* Access/update address held in a register, based on addressing mode. */
470 static inline unsigned long
471 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
473 if (ctxt->ad_bytes == sizeof(unsigned long))
476 return reg & ad_mask(ctxt);
479 static inline unsigned long
480 register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
482 return address_mask(ctxt, reg);
485 static void masked_increment(ulong *reg, ulong mask, int inc)
487 assign_masked(reg, *reg + inc, mask);
491 register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
495 if (ctxt->ad_bytes == sizeof(unsigned long))
498 mask = ad_mask(ctxt);
499 masked_increment(reg, mask, inc);
502 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
504 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
507 static u32 desc_limit_scaled(struct desc_struct *desc)
509 u32 limit = get_desc_limit(desc);
511 return desc->g ? (limit << 12) | 0xfff : limit;
514 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
516 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
519 return ctxt->ops->get_cached_segment_base(ctxt, seg);
522 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
523 u32 error, bool valid)
526 ctxt->exception.vector = vec;
527 ctxt->exception.error_code = error;
528 ctxt->exception.error_code_valid = valid;
529 return X86EMUL_PROPAGATE_FAULT;
532 static int emulate_db(struct x86_emulate_ctxt *ctxt)
534 return emulate_exception(ctxt, DB_VECTOR, 0, false);
537 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
539 return emulate_exception(ctxt, GP_VECTOR, err, true);
542 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
544 return emulate_exception(ctxt, SS_VECTOR, err, true);
547 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
549 return emulate_exception(ctxt, UD_VECTOR, 0, false);
552 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
554 return emulate_exception(ctxt, TS_VECTOR, err, true);
557 static int emulate_de(struct x86_emulate_ctxt *ctxt)
559 return emulate_exception(ctxt, DE_VECTOR, 0, false);
562 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
564 return emulate_exception(ctxt, NM_VECTOR, 0, false);
567 static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
570 switch (ctxt->op_bytes) {
572 ctxt->_eip = (u16)dst;
575 ctxt->_eip = (u32)dst;
579 if ((cs_l && is_noncanonical_address(dst)) ||
580 (!cs_l && (dst >> 32) != 0))
581 return emulate_gp(ctxt, 0);
586 WARN(1, "unsupported eip assignment size\n");
588 return X86EMUL_CONTINUE;
591 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
593 return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
596 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
598 return assign_eip_near(ctxt, ctxt->_eip + rel);
601 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
604 struct desc_struct desc;
606 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
610 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
615 struct desc_struct desc;
617 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
618 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
622 * x86 defines three classes of vector instructions: explicitly
623 * aligned, explicitly unaligned, and the rest, which change behaviour
624 * depending on whether they're AVX encoded or not.
626 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
627 * subject to the same check.
629 static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
631 if (likely(size < 16))
634 if (ctxt->d & Aligned)
636 else if (ctxt->d & Unaligned)
638 else if (ctxt->d & Avx)
644 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
645 struct segmented_address addr,
646 unsigned *max_size, unsigned size,
647 bool write, bool fetch,
650 struct desc_struct desc;
657 la = seg_base(ctxt, addr.seg) + addr.ea;
659 switch (ctxt->mode) {
660 case X86EMUL_MODE_PROT64:
661 if (is_noncanonical_address(la))
662 return emulate_gp(ctxt, 0);
664 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
665 if (size > *max_size)
669 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
673 /* code segment in protected mode or read-only data segment */
674 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
675 || !(desc.type & 2)) && write)
677 /* unreadable code segment */
678 if (!fetch && (desc.type & 8) && !(desc.type & 2))
680 lim = desc_limit_scaled(&desc);
681 if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch &&
682 (ctxt->d & NoBigReal)) {
683 /* la is between zero and 0xffff */
686 *max_size = 0x10000 - la;
687 } else if ((desc.type & 8) || !(desc.type & 4)) {
688 /* expand-up segment */
691 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
693 /* expand-down segment */
696 lim = desc.d ? 0xffffffff : 0xffff;
699 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
701 if (size > *max_size)
703 cpl = ctxt->ops->cpl(ctxt);
704 if (!(desc.type & 8)) {
708 } else if ((desc.type & 8) && !(desc.type & 4)) {
709 /* nonconforming code segment */
712 } else if ((desc.type & 8) && (desc.type & 4)) {
713 /* conforming code segment */
719 if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
721 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
722 return emulate_gp(ctxt, 0);
724 return X86EMUL_CONTINUE;
726 if (addr.seg == VCPU_SREG_SS)
727 return emulate_ss(ctxt, 0);
729 return emulate_gp(ctxt, 0);
732 static int linearize(struct x86_emulate_ctxt *ctxt,
733 struct segmented_address addr,
734 unsigned size, bool write,
738 return __linearize(ctxt, addr, &max_size, size, write, false, linear);
742 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
743 struct segmented_address addr,
750 rc = linearize(ctxt, addr, size, false, &linear);
751 if (rc != X86EMUL_CONTINUE)
753 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
757 * Prefetch the remaining bytes of the instruction without crossing page
758 * boundary if they are not in fetch_cache yet.
760 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
763 unsigned size, max_size;
764 unsigned long linear;
765 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
766 struct segmented_address addr = { .seg = VCPU_SREG_CS,
767 .ea = ctxt->eip + cur_size };
770 * We do not know exactly how many bytes will be needed, and
771 * __linearize is expensive, so fetch as much as possible. We
772 * just have to avoid going beyond the 15 byte limit, the end
773 * of the segment, or the end of the page.
775 * __linearize is called with size 0 so that it does not do any
776 * boundary check itself. Instead, we use max_size to check
779 rc = __linearize(ctxt, addr, &max_size, 0, false, true, &linear);
780 if (unlikely(rc != X86EMUL_CONTINUE))
783 size = min_t(unsigned, 15UL ^ cur_size, max_size);
784 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
787 * One instruction can only straddle two pages,
788 * and one has been loaded at the beginning of
789 * x86_decode_insn. So, if not enough bytes
790 * still, we must have hit the 15-byte boundary.
792 if (unlikely(size < op_size))
793 return emulate_gp(ctxt, 0);
795 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
796 size, &ctxt->exception);
797 if (unlikely(rc != X86EMUL_CONTINUE))
799 ctxt->fetch.end += size;
800 return X86EMUL_CONTINUE;
803 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
806 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
808 if (unlikely(done_size < size))
809 return __do_insn_fetch_bytes(ctxt, size - done_size);
811 return X86EMUL_CONTINUE;
814 /* Fetch next part of the instruction being emulated. */
815 #define insn_fetch(_type, _ctxt) \
818 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
819 if (rc != X86EMUL_CONTINUE) \
821 ctxt->_eip += sizeof(_type); \
822 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
823 ctxt->fetch.ptr += sizeof(_type); \
827 #define insn_fetch_arr(_arr, _size, _ctxt) \
829 rc = do_insn_fetch_bytes(_ctxt, _size); \
830 if (rc != X86EMUL_CONTINUE) \
832 ctxt->_eip += (_size); \
833 memcpy(_arr, ctxt->fetch.ptr, _size); \
834 ctxt->fetch.ptr += (_size); \
838 * Given the 'reg' portion of a ModRM byte, and a register block, return a
839 * pointer into the block that addresses the relevant register.
840 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
842 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
846 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
848 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
849 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
851 p = reg_rmw(ctxt, modrm_reg);
855 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
856 struct segmented_address addr,
857 u16 *size, unsigned long *address, int op_bytes)
864 rc = segmented_read_std(ctxt, addr, size, 2);
865 if (rc != X86EMUL_CONTINUE)
868 rc = segmented_read_std(ctxt, addr, address, op_bytes);
882 FASTOP1SRC2(mul, mul_ex);
883 FASTOP1SRC2(imul, imul_ex);
884 FASTOP1SRC2EX(div, div_ex);
885 FASTOP1SRC2EX(idiv, idiv_ex);
914 static u8 test_cc(unsigned int condition, unsigned long flags)
917 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
919 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
920 asm("push %[flags]; popf; call *%[fastop]"
921 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
925 static void fetch_register_operand(struct operand *op)
929 op->val = *(u8 *)op->addr.reg;
932 op->val = *(u16 *)op->addr.reg;
935 op->val = *(u32 *)op->addr.reg;
938 op->val = *(u64 *)op->addr.reg;
943 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
945 ctxt->ops->get_fpu(ctxt);
947 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
948 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
949 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
950 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
951 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
952 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
953 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
954 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
956 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
957 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
958 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
959 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
960 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
961 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
962 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
963 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
967 ctxt->ops->put_fpu(ctxt);
970 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
973 ctxt->ops->get_fpu(ctxt);
975 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
976 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
977 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
978 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
979 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
980 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
981 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
982 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
984 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
985 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
986 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
987 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
988 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
989 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
990 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
991 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
995 ctxt->ops->put_fpu(ctxt);
998 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1000 ctxt->ops->get_fpu(ctxt);
1002 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1003 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1004 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1005 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1006 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1007 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1008 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1009 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1012 ctxt->ops->put_fpu(ctxt);
1015 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1017 ctxt->ops->get_fpu(ctxt);
1019 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1020 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1021 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1022 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1023 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1024 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1025 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1026 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1029 ctxt->ops->put_fpu(ctxt);
1032 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1034 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1035 return emulate_nm(ctxt);
1037 ctxt->ops->get_fpu(ctxt);
1038 asm volatile("fninit");
1039 ctxt->ops->put_fpu(ctxt);
1040 return X86EMUL_CONTINUE;
1043 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1047 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1048 return emulate_nm(ctxt);
1050 ctxt->ops->get_fpu(ctxt);
1051 asm volatile("fnstcw %0": "+m"(fcw));
1052 ctxt->ops->put_fpu(ctxt);
1054 /* force 2 byte destination */
1055 ctxt->dst.bytes = 2;
1056 ctxt->dst.val = fcw;
1058 return X86EMUL_CONTINUE;
1061 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1065 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1066 return emulate_nm(ctxt);
1068 ctxt->ops->get_fpu(ctxt);
1069 asm volatile("fnstsw %0": "+m"(fsw));
1070 ctxt->ops->put_fpu(ctxt);
1072 /* force 2 byte destination */
1073 ctxt->dst.bytes = 2;
1074 ctxt->dst.val = fsw;
1076 return X86EMUL_CONTINUE;
1079 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1082 unsigned reg = ctxt->modrm_reg;
1084 if (!(ctxt->d & ModRM))
1085 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1087 if (ctxt->d & Sse) {
1091 read_sse_reg(ctxt, &op->vec_val, reg);
1094 if (ctxt->d & Mmx) {
1103 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1104 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1106 fetch_register_operand(op);
1107 op->orig_val = op->val;
1110 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1112 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1113 ctxt->modrm_seg = VCPU_SREG_SS;
1116 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1120 int index_reg, base_reg, scale;
1121 int rc = X86EMUL_CONTINUE;
1124 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1125 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1126 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1128 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1129 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1130 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1131 ctxt->modrm_seg = VCPU_SREG_DS;
1133 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1135 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1136 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1138 if (ctxt->d & Sse) {
1141 op->addr.xmm = ctxt->modrm_rm;
1142 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1145 if (ctxt->d & Mmx) {
1148 op->addr.mm = ctxt->modrm_rm & 7;
1151 fetch_register_operand(op);
1157 if (ctxt->ad_bytes == 2) {
1158 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1159 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1160 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1161 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1163 /* 16-bit ModR/M decode. */
1164 switch (ctxt->modrm_mod) {
1166 if (ctxt->modrm_rm == 6)
1167 modrm_ea += insn_fetch(u16, ctxt);
1170 modrm_ea += insn_fetch(s8, ctxt);
1173 modrm_ea += insn_fetch(u16, ctxt);
1176 switch (ctxt->modrm_rm) {
1178 modrm_ea += bx + si;
1181 modrm_ea += bx + di;
1184 modrm_ea += bp + si;
1187 modrm_ea += bp + di;
1196 if (ctxt->modrm_mod != 0)
1203 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1204 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1205 ctxt->modrm_seg = VCPU_SREG_SS;
1206 modrm_ea = (u16)modrm_ea;
1208 /* 32/64-bit ModR/M decode. */
1209 if ((ctxt->modrm_rm & 7) == 4) {
1210 sib = insn_fetch(u8, ctxt);
1211 index_reg |= (sib >> 3) & 7;
1212 base_reg |= sib & 7;
1215 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1216 modrm_ea += insn_fetch(s32, ctxt);
1218 modrm_ea += reg_read(ctxt, base_reg);
1219 adjust_modrm_seg(ctxt, base_reg);
1222 modrm_ea += reg_read(ctxt, index_reg) << scale;
1223 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1224 if (ctxt->mode == X86EMUL_MODE_PROT64)
1225 ctxt->rip_relative = 1;
1227 base_reg = ctxt->modrm_rm;
1228 modrm_ea += reg_read(ctxt, base_reg);
1229 adjust_modrm_seg(ctxt, base_reg);
1231 switch (ctxt->modrm_mod) {
1233 if (ctxt->modrm_rm == 5)
1234 modrm_ea += insn_fetch(s32, ctxt);
1237 modrm_ea += insn_fetch(s8, ctxt);
1240 modrm_ea += insn_fetch(s32, ctxt);
1244 op->addr.mem.ea = modrm_ea;
1245 if (ctxt->ad_bytes != 8)
1246 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1252 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1255 int rc = X86EMUL_CONTINUE;
1258 switch (ctxt->ad_bytes) {
1260 op->addr.mem.ea = insn_fetch(u16, ctxt);
1263 op->addr.mem.ea = insn_fetch(u32, ctxt);
1266 op->addr.mem.ea = insn_fetch(u64, ctxt);
1273 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1277 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1278 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1280 if (ctxt->src.bytes == 2)
1281 sv = (s16)ctxt->src.val & (s16)mask;
1282 else if (ctxt->src.bytes == 4)
1283 sv = (s32)ctxt->src.val & (s32)mask;
1285 sv = (s64)ctxt->src.val & (s64)mask;
1287 ctxt->dst.addr.mem.ea += (sv >> 3);
1290 /* only subword offset */
1291 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1294 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1295 unsigned long addr, void *dest, unsigned size)
1298 struct read_cache *mc = &ctxt->mem_read;
1300 if (mc->pos < mc->end)
1303 WARN_ON((mc->end + size) >= sizeof(mc->data));
1305 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1307 if (rc != X86EMUL_CONTINUE)
1313 memcpy(dest, mc->data + mc->pos, size);
1315 return X86EMUL_CONTINUE;
1318 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1319 struct segmented_address addr,
1326 rc = linearize(ctxt, addr, size, false, &linear);
1327 if (rc != X86EMUL_CONTINUE)
1329 return read_emulated(ctxt, linear, data, size);
1332 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1333 struct segmented_address addr,
1340 rc = linearize(ctxt, addr, size, true, &linear);
1341 if (rc != X86EMUL_CONTINUE)
1343 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1347 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1348 struct segmented_address addr,
1349 const void *orig_data, const void *data,
1355 rc = linearize(ctxt, addr, size, true, &linear);
1356 if (rc != X86EMUL_CONTINUE)
1358 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1359 size, &ctxt->exception);
1362 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1363 unsigned int size, unsigned short port,
1366 struct read_cache *rc = &ctxt->io_read;
1368 if (rc->pos == rc->end) { /* refill pio read ahead */
1369 unsigned int in_page, n;
1370 unsigned int count = ctxt->rep_prefix ?
1371 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1372 in_page = (ctxt->eflags & EFLG_DF) ?
1373 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1374 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1375 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1378 rc->pos = rc->end = 0;
1379 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1384 if (ctxt->rep_prefix && (ctxt->d & String) &&
1385 !(ctxt->eflags & EFLG_DF)) {
1386 ctxt->dst.data = rc->data + rc->pos;
1387 ctxt->dst.type = OP_MEM_STR;
1388 ctxt->dst.count = (rc->end - rc->pos) / size;
1391 memcpy(dest, rc->data + rc->pos, size);
1397 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1398 u16 index, struct desc_struct *desc)
1403 ctxt->ops->get_idt(ctxt, &dt);
1405 if (dt.size < index * 8 + 7)
1406 return emulate_gp(ctxt, index << 3 | 0x2);
1408 addr = dt.address + index * 8;
1409 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1413 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1414 u16 selector, struct desc_ptr *dt)
1416 const struct x86_emulate_ops *ops = ctxt->ops;
1419 if (selector & 1 << 2) {
1420 struct desc_struct desc;
1423 memset (dt, 0, sizeof *dt);
1424 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1428 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1429 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1431 ops->get_gdt(ctxt, dt);
1434 /* allowed just for 8 bytes segments */
1435 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1436 u16 selector, struct desc_struct *desc,
1440 u16 index = selector >> 3;
1443 get_descriptor_table_ptr(ctxt, selector, &dt);
1445 if (dt.size < index * 8 + 7)
1446 return emulate_gp(ctxt, selector & 0xfffc);
1448 *desc_addr_p = addr = dt.address + index * 8;
1449 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1453 /* allowed just for 8 bytes segments */
1454 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1455 u16 selector, struct desc_struct *desc)
1458 u16 index = selector >> 3;
1461 get_descriptor_table_ptr(ctxt, selector, &dt);
1463 if (dt.size < index * 8 + 7)
1464 return emulate_gp(ctxt, selector & 0xfffc);
1466 addr = dt.address + index * 8;
1467 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1471 /* Does not support long mode */
1472 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1473 u16 selector, int seg, u8 cpl,
1474 bool in_task_switch,
1475 struct desc_struct *desc)
1477 struct desc_struct seg_desc, old_desc;
1479 unsigned err_vec = GP_VECTOR;
1481 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1487 memset(&seg_desc, 0, sizeof seg_desc);
1489 if (ctxt->mode == X86EMUL_MODE_REAL) {
1490 /* set real mode segment descriptor (keep limit etc. for
1492 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1493 set_desc_base(&seg_desc, selector << 4);
1495 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1496 /* VM86 needs a clean new segment descriptor */
1497 set_desc_base(&seg_desc, selector << 4);
1498 set_desc_limit(&seg_desc, 0xffff);
1508 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1509 if ((seg == VCPU_SREG_CS
1510 || (seg == VCPU_SREG_SS
1511 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1512 || seg == VCPU_SREG_TR)
1516 /* TR should be in GDT only */
1517 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1520 if (null_selector) /* for NULL selector skip all following checks */
1523 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1524 if (ret != X86EMUL_CONTINUE)
1527 err_code = selector & 0xfffc;
1528 err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR;
1530 /* can't load system descriptor into segment selector */
1531 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1535 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1544 * segment is not a writable data segment or segment
1545 * selector's RPL != CPL or segment selector's RPL != CPL
1547 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1551 if (!(seg_desc.type & 8))
1554 if (seg_desc.type & 4) {
1560 if (rpl > cpl || dpl != cpl)
1563 /* in long-mode d/b must be clear if l is set */
1564 if (seg_desc.d && seg_desc.l) {
1567 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1568 if (efer & EFER_LMA)
1572 /* CS(RPL) <- CPL */
1573 selector = (selector & 0xfffc) | cpl;
1576 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1578 old_desc = seg_desc;
1579 seg_desc.type |= 2; /* busy */
1580 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1581 sizeof(seg_desc), &ctxt->exception);
1582 if (ret != X86EMUL_CONTINUE)
1585 case VCPU_SREG_LDTR:
1586 if (seg_desc.s || seg_desc.type != 2)
1589 default: /* DS, ES, FS, or GS */
1591 * segment is not a data or readable code segment or
1592 * ((segment is a data or nonconforming code segment)
1593 * and (both RPL and CPL > DPL))
1595 if ((seg_desc.type & 0xa) == 0x8 ||
1596 (((seg_desc.type & 0xc) != 0xc) &&
1597 (rpl > dpl && cpl > dpl)))
1603 /* mark segment as accessed */
1605 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1606 if (ret != X86EMUL_CONTINUE)
1608 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1609 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1610 sizeof(base3), &ctxt->exception);
1611 if (ret != X86EMUL_CONTINUE)
1615 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1618 return X86EMUL_CONTINUE;
1620 return emulate_exception(ctxt, err_vec, err_code, true);
1623 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1624 u16 selector, int seg)
1626 u8 cpl = ctxt->ops->cpl(ctxt);
1627 return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
1630 static void write_register_operand(struct operand *op)
1632 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1633 switch (op->bytes) {
1635 *(u8 *)op->addr.reg = (u8)op->val;
1638 *(u16 *)op->addr.reg = (u16)op->val;
1641 *op->addr.reg = (u32)op->val;
1642 break; /* 64b: zero-extend */
1644 *op->addr.reg = op->val;
1649 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1653 write_register_operand(op);
1656 if (ctxt->lock_prefix)
1657 return segmented_cmpxchg(ctxt,
1663 return segmented_write(ctxt,
1669 return segmented_write(ctxt,
1672 op->bytes * op->count);
1675 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1678 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1686 return X86EMUL_CONTINUE;
1689 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1691 struct segmented_address addr;
1693 rsp_increment(ctxt, -bytes);
1694 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1695 addr.seg = VCPU_SREG_SS;
1697 return segmented_write(ctxt, addr, data, bytes);
1700 static int em_push(struct x86_emulate_ctxt *ctxt)
1702 /* Disable writeback. */
1703 ctxt->dst.type = OP_NONE;
1704 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1707 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1708 void *dest, int len)
1711 struct segmented_address addr;
1713 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1714 addr.seg = VCPU_SREG_SS;
1715 rc = segmented_read(ctxt, addr, dest, len);
1716 if (rc != X86EMUL_CONTINUE)
1719 rsp_increment(ctxt, len);
1723 static int em_pop(struct x86_emulate_ctxt *ctxt)
1725 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1728 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1729 void *dest, int len)
1732 unsigned long val, change_mask;
1733 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1734 int cpl = ctxt->ops->cpl(ctxt);
1736 rc = emulate_pop(ctxt, &val, len);
1737 if (rc != X86EMUL_CONTINUE)
1740 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1741 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
1743 switch(ctxt->mode) {
1744 case X86EMUL_MODE_PROT64:
1745 case X86EMUL_MODE_PROT32:
1746 case X86EMUL_MODE_PROT16:
1748 change_mask |= EFLG_IOPL;
1750 change_mask |= EFLG_IF;
1752 case X86EMUL_MODE_VM86:
1754 return emulate_gp(ctxt, 0);
1755 change_mask |= EFLG_IF;
1757 default: /* real mode */
1758 change_mask |= (EFLG_IOPL | EFLG_IF);
1762 *(unsigned long *)dest =
1763 (ctxt->eflags & ~change_mask) | (val & change_mask);
1768 static int em_popf(struct x86_emulate_ctxt *ctxt)
1770 ctxt->dst.type = OP_REG;
1771 ctxt->dst.addr.reg = &ctxt->eflags;
1772 ctxt->dst.bytes = ctxt->op_bytes;
1773 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1776 static int em_enter(struct x86_emulate_ctxt *ctxt)
1779 unsigned frame_size = ctxt->src.val;
1780 unsigned nesting_level = ctxt->src2.val & 31;
1784 return X86EMUL_UNHANDLEABLE;
1786 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1787 rc = push(ctxt, &rbp, stack_size(ctxt));
1788 if (rc != X86EMUL_CONTINUE)
1790 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1792 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1793 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1795 return X86EMUL_CONTINUE;
1798 static int em_leave(struct x86_emulate_ctxt *ctxt)
1800 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1802 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1805 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1807 int seg = ctxt->src2.val;
1809 ctxt->src.val = get_segment_selector(ctxt, seg);
1811 return em_push(ctxt);
1814 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1816 int seg = ctxt->src2.val;
1817 unsigned long selector;
1820 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1821 if (rc != X86EMUL_CONTINUE)
1824 if (ctxt->modrm_reg == VCPU_SREG_SS)
1825 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1827 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1831 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1833 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1834 int rc = X86EMUL_CONTINUE;
1835 int reg = VCPU_REGS_RAX;
1837 while (reg <= VCPU_REGS_RDI) {
1838 (reg == VCPU_REGS_RSP) ?
1839 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1842 if (rc != X86EMUL_CONTINUE)
1851 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1853 ctxt->src.val = (unsigned long)ctxt->eflags;
1854 return em_push(ctxt);
1857 static int em_popa(struct x86_emulate_ctxt *ctxt)
1859 int rc = X86EMUL_CONTINUE;
1860 int reg = VCPU_REGS_RDI;
1862 while (reg >= VCPU_REGS_RAX) {
1863 if (reg == VCPU_REGS_RSP) {
1864 rsp_increment(ctxt, ctxt->op_bytes);
1868 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
1869 if (rc != X86EMUL_CONTINUE)
1876 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1878 const struct x86_emulate_ops *ops = ctxt->ops;
1885 /* TODO: Add limit checks */
1886 ctxt->src.val = ctxt->eflags;
1888 if (rc != X86EMUL_CONTINUE)
1891 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1893 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1895 if (rc != X86EMUL_CONTINUE)
1898 ctxt->src.val = ctxt->_eip;
1900 if (rc != X86EMUL_CONTINUE)
1903 ops->get_idt(ctxt, &dt);
1905 eip_addr = dt.address + (irq << 2);
1906 cs_addr = dt.address + (irq << 2) + 2;
1908 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1909 if (rc != X86EMUL_CONTINUE)
1912 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1913 if (rc != X86EMUL_CONTINUE)
1916 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1917 if (rc != X86EMUL_CONTINUE)
1925 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1929 invalidate_registers(ctxt);
1930 rc = __emulate_int_real(ctxt, irq);
1931 if (rc == X86EMUL_CONTINUE)
1932 writeback_registers(ctxt);
1936 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1938 switch(ctxt->mode) {
1939 case X86EMUL_MODE_REAL:
1940 return __emulate_int_real(ctxt, irq);
1941 case X86EMUL_MODE_VM86:
1942 case X86EMUL_MODE_PROT16:
1943 case X86EMUL_MODE_PROT32:
1944 case X86EMUL_MODE_PROT64:
1946 /* Protected mode interrupts unimplemented yet */
1947 return X86EMUL_UNHANDLEABLE;
1951 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1953 int rc = X86EMUL_CONTINUE;
1954 unsigned long temp_eip = 0;
1955 unsigned long temp_eflags = 0;
1956 unsigned long cs = 0;
1957 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1958 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1959 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1960 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1962 /* TODO: Add stack limit check */
1964 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1966 if (rc != X86EMUL_CONTINUE)
1969 if (temp_eip & ~0xffff)
1970 return emulate_gp(ctxt, 0);
1972 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1974 if (rc != X86EMUL_CONTINUE)
1977 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1979 if (rc != X86EMUL_CONTINUE)
1982 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1984 if (rc != X86EMUL_CONTINUE)
1987 ctxt->_eip = temp_eip;
1990 if (ctxt->op_bytes == 4)
1991 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1992 else if (ctxt->op_bytes == 2) {
1993 ctxt->eflags &= ~0xffff;
1994 ctxt->eflags |= temp_eflags;
1997 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1998 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2003 static int em_iret(struct x86_emulate_ctxt *ctxt)
2005 switch(ctxt->mode) {
2006 case X86EMUL_MODE_REAL:
2007 return emulate_iret_real(ctxt);
2008 case X86EMUL_MODE_VM86:
2009 case X86EMUL_MODE_PROT16:
2010 case X86EMUL_MODE_PROT32:
2011 case X86EMUL_MODE_PROT64:
2013 /* iret from protected mode unimplemented yet */
2014 return X86EMUL_UNHANDLEABLE;
2018 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2021 unsigned short sel, old_sel;
2022 struct desc_struct old_desc, new_desc;
2023 const struct x86_emulate_ops *ops = ctxt->ops;
2024 u8 cpl = ctxt->ops->cpl(ctxt);
2026 /* Assignment of RIP may only fail in 64-bit mode */
2027 if (ctxt->mode == X86EMUL_MODE_PROT64)
2028 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2031 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2033 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
2035 if (rc != X86EMUL_CONTINUE)
2038 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
2039 if (rc != X86EMUL_CONTINUE) {
2040 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2041 /* assigning eip failed; restore the old cs */
2042 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2048 static int em_grp45(struct x86_emulate_ctxt *ctxt)
2050 int rc = X86EMUL_CONTINUE;
2052 switch (ctxt->modrm_reg) {
2053 case 2: /* call near abs */ {
2055 old_eip = ctxt->_eip;
2056 rc = assign_eip_near(ctxt, ctxt->src.val);
2057 if (rc != X86EMUL_CONTINUE)
2059 ctxt->src.val = old_eip;
2063 case 4: /* jmp abs */
2064 rc = assign_eip_near(ctxt, ctxt->src.val);
2066 case 5: /* jmp far */
2067 rc = em_jmp_far(ctxt);
2076 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2078 u64 old = ctxt->dst.orig_val64;
2080 if (ctxt->dst.bytes == 16)
2081 return X86EMUL_UNHANDLEABLE;
2083 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2084 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2085 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2086 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2087 ctxt->eflags &= ~EFLG_ZF;
2089 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2090 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2092 ctxt->eflags |= EFLG_ZF;
2094 return X86EMUL_CONTINUE;
2097 static int em_ret(struct x86_emulate_ctxt *ctxt)
2102 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2103 if (rc != X86EMUL_CONTINUE)
2106 return assign_eip_near(ctxt, eip);
2109 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2112 unsigned long eip, cs;
2114 int cpl = ctxt->ops->cpl(ctxt);
2115 struct desc_struct old_desc, new_desc;
2116 const struct x86_emulate_ops *ops = ctxt->ops;
2118 if (ctxt->mode == X86EMUL_MODE_PROT64)
2119 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2122 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2123 if (rc != X86EMUL_CONTINUE)
2125 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2126 if (rc != X86EMUL_CONTINUE)
2128 /* Outer-privilege level return is not implemented */
2129 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2130 return X86EMUL_UNHANDLEABLE;
2131 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false,
2133 if (rc != X86EMUL_CONTINUE)
2135 rc = assign_eip_far(ctxt, eip, new_desc.l);
2136 if (rc != X86EMUL_CONTINUE) {
2137 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2138 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2143 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2147 rc = em_ret_far(ctxt);
2148 if (rc != X86EMUL_CONTINUE)
2150 rsp_increment(ctxt, ctxt->src.val);
2151 return X86EMUL_CONTINUE;
2154 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2156 /* Save real source value, then compare EAX against destination. */
2157 ctxt->dst.orig_val = ctxt->dst.val;
2158 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2159 ctxt->src.orig_val = ctxt->src.val;
2160 ctxt->src.val = ctxt->dst.orig_val;
2161 fastop(ctxt, em_cmp);
2163 if (ctxt->eflags & EFLG_ZF) {
2164 /* Success: write back to memory. */
2165 ctxt->dst.val = ctxt->src.orig_val;
2167 /* Failure: write the value we saw to EAX. */
2168 ctxt->dst.type = OP_REG;
2169 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2170 ctxt->dst.val = ctxt->dst.orig_val;
2172 return X86EMUL_CONTINUE;
2175 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2177 int seg = ctxt->src2.val;
2181 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2183 rc = load_segment_descriptor(ctxt, sel, seg);
2184 if (rc != X86EMUL_CONTINUE)
2187 ctxt->dst.val = ctxt->src.val;
2192 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2193 struct desc_struct *cs, struct desc_struct *ss)
2195 cs->l = 0; /* will be adjusted later */
2196 set_desc_base(cs, 0); /* flat segment */
2197 cs->g = 1; /* 4kb granularity */
2198 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2199 cs->type = 0x0b; /* Read, Execute, Accessed */
2201 cs->dpl = 0; /* will be adjusted later */
2206 set_desc_base(ss, 0); /* flat segment */
2207 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2208 ss->g = 1; /* 4kb granularity */
2210 ss->type = 0x03; /* Read/Write, Accessed */
2211 ss->d = 1; /* 32bit stack segment */
2218 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2220 u32 eax, ebx, ecx, edx;
2223 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2224 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2225 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2226 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2229 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2231 const struct x86_emulate_ops *ops = ctxt->ops;
2232 u32 eax, ebx, ecx, edx;
2235 * syscall should always be enabled in longmode - so only become
2236 * vendor specific (cpuid) if other modes are active...
2238 if (ctxt->mode == X86EMUL_MODE_PROT64)
2243 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2245 * Intel ("GenuineIntel")
2246 * remark: Intel CPUs only support "syscall" in 64bit
2247 * longmode. Also an 64bit guest with a
2248 * 32bit compat-app running will #UD !! While this
2249 * behaviour can be fixed (by emulating) into AMD
2250 * response - CPUs of AMD can't behave like Intel.
2252 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2253 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2254 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2257 /* AMD ("AuthenticAMD") */
2258 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2259 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2260 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2263 /* AMD ("AMDisbetter!") */
2264 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2265 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2266 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2269 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2273 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2275 const struct x86_emulate_ops *ops = ctxt->ops;
2276 struct desc_struct cs, ss;
2281 /* syscall is not available in real mode */
2282 if (ctxt->mode == X86EMUL_MODE_REAL ||
2283 ctxt->mode == X86EMUL_MODE_VM86)
2284 return emulate_ud(ctxt);
2286 if (!(em_syscall_is_enabled(ctxt)))
2287 return emulate_ud(ctxt);
2289 ops->get_msr(ctxt, MSR_EFER, &efer);
2290 setup_syscalls_segments(ctxt, &cs, &ss);
2292 if (!(efer & EFER_SCE))
2293 return emulate_ud(ctxt);
2295 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2297 cs_sel = (u16)(msr_data & 0xfffc);
2298 ss_sel = (u16)(msr_data + 8);
2300 if (efer & EFER_LMA) {
2304 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2305 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2307 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2308 if (efer & EFER_LMA) {
2309 #ifdef CONFIG_X86_64
2310 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2313 ctxt->mode == X86EMUL_MODE_PROT64 ?
2314 MSR_LSTAR : MSR_CSTAR, &msr_data);
2315 ctxt->_eip = msr_data;
2317 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2318 ctxt->eflags &= ~msr_data;
2322 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2323 ctxt->_eip = (u32)msr_data;
2325 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2328 return X86EMUL_CONTINUE;
2331 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2333 const struct x86_emulate_ops *ops = ctxt->ops;
2334 struct desc_struct cs, ss;
2339 ops->get_msr(ctxt, MSR_EFER, &efer);
2340 /* inject #GP if in real mode */
2341 if (ctxt->mode == X86EMUL_MODE_REAL)
2342 return emulate_gp(ctxt, 0);
2345 * Not recognized on AMD in compat mode (but is recognized in legacy
2348 if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
2349 && !vendor_intel(ctxt))
2350 return emulate_ud(ctxt);
2352 /* XXX sysenter/sysexit have not been tested in 64bit mode.
2353 * Therefore, we inject an #UD.
2355 if (ctxt->mode == X86EMUL_MODE_PROT64)
2356 return emulate_ud(ctxt);
2358 setup_syscalls_segments(ctxt, &cs, &ss);
2360 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2361 switch (ctxt->mode) {
2362 case X86EMUL_MODE_PROT32:
2363 if ((msr_data & 0xfffc) == 0x0)
2364 return emulate_gp(ctxt, 0);
2366 case X86EMUL_MODE_PROT64:
2367 if (msr_data == 0x0)
2368 return emulate_gp(ctxt, 0);
2374 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2375 cs_sel = (u16)msr_data;
2376 cs_sel &= ~SELECTOR_RPL_MASK;
2377 ss_sel = cs_sel + 8;
2378 ss_sel &= ~SELECTOR_RPL_MASK;
2379 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
2384 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2385 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2387 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2388 ctxt->_eip = msr_data;
2390 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2391 *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
2393 return X86EMUL_CONTINUE;
2396 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2398 const struct x86_emulate_ops *ops = ctxt->ops;
2399 struct desc_struct cs, ss;
2400 u64 msr_data, rcx, rdx;
2402 u16 cs_sel = 0, ss_sel = 0;
2404 /* inject #GP if in real mode or Virtual 8086 mode */
2405 if (ctxt->mode == X86EMUL_MODE_REAL ||
2406 ctxt->mode == X86EMUL_MODE_VM86)
2407 return emulate_gp(ctxt, 0);
2409 setup_syscalls_segments(ctxt, &cs, &ss);
2411 if ((ctxt->rex_prefix & 0x8) != 0x0)
2412 usermode = X86EMUL_MODE_PROT64;
2414 usermode = X86EMUL_MODE_PROT32;
2416 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2417 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2421 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2423 case X86EMUL_MODE_PROT32:
2424 cs_sel = (u16)(msr_data + 16);
2425 if ((msr_data & 0xfffc) == 0x0)
2426 return emulate_gp(ctxt, 0);
2427 ss_sel = (u16)(msr_data + 24);
2429 case X86EMUL_MODE_PROT64:
2430 cs_sel = (u16)(msr_data + 32);
2431 if (msr_data == 0x0)
2432 return emulate_gp(ctxt, 0);
2433 ss_sel = cs_sel + 8;
2436 if (is_noncanonical_address(rcx) ||
2437 is_noncanonical_address(rdx))
2438 return emulate_gp(ctxt, 0);
2441 cs_sel |= SELECTOR_RPL_MASK;
2442 ss_sel |= SELECTOR_RPL_MASK;
2444 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2445 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2448 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2450 return X86EMUL_CONTINUE;
2453 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2456 if (ctxt->mode == X86EMUL_MODE_REAL)
2458 if (ctxt->mode == X86EMUL_MODE_VM86)
2460 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2461 return ctxt->ops->cpl(ctxt) > iopl;
2464 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2467 const struct x86_emulate_ops *ops = ctxt->ops;
2468 struct desc_struct tr_seg;
2471 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2472 unsigned mask = (1 << len) - 1;
2475 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2478 if (desc_limit_scaled(&tr_seg) < 103)
2480 base = get_desc_base(&tr_seg);
2481 #ifdef CONFIG_X86_64
2482 base |= ((u64)base3) << 32;
2484 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2485 if (r != X86EMUL_CONTINUE)
2487 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2489 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2490 if (r != X86EMUL_CONTINUE)
2492 if ((perm >> bit_idx) & mask)
2497 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2503 if (emulator_bad_iopl(ctxt))
2504 if (!emulator_io_port_access_allowed(ctxt, port, len))
2507 ctxt->perm_ok = true;
2512 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2513 struct tss_segment_16 *tss)
2515 tss->ip = ctxt->_eip;
2516 tss->flag = ctxt->eflags;
2517 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2518 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2519 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2520 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2521 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2522 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2523 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2524 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2526 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2527 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2528 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2529 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2530 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2533 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2534 struct tss_segment_16 *tss)
2539 ctxt->_eip = tss->ip;
2540 ctxt->eflags = tss->flag | 2;
2541 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2542 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2543 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2544 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2545 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2546 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2547 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2548 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2551 * SDM says that segment selectors are loaded before segment
2554 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2555 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2556 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2557 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2558 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2563 * Now load segment descriptors. If fault happens at this stage
2564 * it is handled in a context of new task
2566 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2568 if (ret != X86EMUL_CONTINUE)
2570 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2572 if (ret != X86EMUL_CONTINUE)
2574 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2576 if (ret != X86EMUL_CONTINUE)
2578 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2580 if (ret != X86EMUL_CONTINUE)
2582 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2584 if (ret != X86EMUL_CONTINUE)
2587 return X86EMUL_CONTINUE;
2590 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2591 u16 tss_selector, u16 old_tss_sel,
2592 ulong old_tss_base, struct desc_struct *new_desc)
2594 const struct x86_emulate_ops *ops = ctxt->ops;
2595 struct tss_segment_16 tss_seg;
2597 u32 new_tss_base = get_desc_base(new_desc);
2599 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2601 if (ret != X86EMUL_CONTINUE)
2602 /* FIXME: need to provide precise fault address */
2605 save_state_to_tss16(ctxt, &tss_seg);
2607 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2609 if (ret != X86EMUL_CONTINUE)
2610 /* FIXME: need to provide precise fault address */
2613 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2615 if (ret != X86EMUL_CONTINUE)
2616 /* FIXME: need to provide precise fault address */
2619 if (old_tss_sel != 0xffff) {
2620 tss_seg.prev_task_link = old_tss_sel;
2622 ret = ops->write_std(ctxt, new_tss_base,
2623 &tss_seg.prev_task_link,
2624 sizeof tss_seg.prev_task_link,
2626 if (ret != X86EMUL_CONTINUE)
2627 /* FIXME: need to provide precise fault address */
2631 return load_state_from_tss16(ctxt, &tss_seg);
2634 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2635 struct tss_segment_32 *tss)
2637 /* CR3 and ldt selector are not saved intentionally */
2638 tss->eip = ctxt->_eip;
2639 tss->eflags = ctxt->eflags;
2640 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2641 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2642 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2643 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2644 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2645 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2646 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2647 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2649 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2650 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2651 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2652 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2653 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2654 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2657 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2658 struct tss_segment_32 *tss)
2663 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2664 return emulate_gp(ctxt, 0);
2665 ctxt->_eip = tss->eip;
2666 ctxt->eflags = tss->eflags | 2;
2668 /* General purpose registers */
2669 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2670 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2671 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2672 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2673 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2674 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2675 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2676 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2679 * SDM says that segment selectors are loaded before segment
2680 * descriptors. This is important because CPL checks will
2683 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2684 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2685 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2686 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2687 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2688 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2689 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2692 * If we're switching between Protected Mode and VM86, we need to make
2693 * sure to update the mode before loading the segment descriptors so
2694 * that the selectors are interpreted correctly.
2696 if (ctxt->eflags & X86_EFLAGS_VM) {
2697 ctxt->mode = X86EMUL_MODE_VM86;
2700 ctxt->mode = X86EMUL_MODE_PROT32;
2705 * Now load segment descriptors. If fault happenes at this stage
2706 * it is handled in a context of new task
2708 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2710 if (ret != X86EMUL_CONTINUE)
2712 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2714 if (ret != X86EMUL_CONTINUE)
2716 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2718 if (ret != X86EMUL_CONTINUE)
2720 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2722 if (ret != X86EMUL_CONTINUE)
2724 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2726 if (ret != X86EMUL_CONTINUE)
2728 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2730 if (ret != X86EMUL_CONTINUE)
2732 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2734 if (ret != X86EMUL_CONTINUE)
2737 return X86EMUL_CONTINUE;
2740 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2741 u16 tss_selector, u16 old_tss_sel,
2742 ulong old_tss_base, struct desc_struct *new_desc)
2744 const struct x86_emulate_ops *ops = ctxt->ops;
2745 struct tss_segment_32 tss_seg;
2747 u32 new_tss_base = get_desc_base(new_desc);
2748 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2749 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
2751 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2753 if (ret != X86EMUL_CONTINUE)
2754 /* FIXME: need to provide precise fault address */
2757 save_state_to_tss32(ctxt, &tss_seg);
2759 /* Only GP registers and segment selectors are saved */
2760 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2761 ldt_sel_offset - eip_offset, &ctxt->exception);
2762 if (ret != X86EMUL_CONTINUE)
2763 /* FIXME: need to provide precise fault address */
2766 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2768 if (ret != X86EMUL_CONTINUE)
2769 /* FIXME: need to provide precise fault address */
2772 if (old_tss_sel != 0xffff) {
2773 tss_seg.prev_task_link = old_tss_sel;
2775 ret = ops->write_std(ctxt, new_tss_base,
2776 &tss_seg.prev_task_link,
2777 sizeof tss_seg.prev_task_link,
2779 if (ret != X86EMUL_CONTINUE)
2780 /* FIXME: need to provide precise fault address */
2784 return load_state_from_tss32(ctxt, &tss_seg);
2787 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2788 u16 tss_selector, int idt_index, int reason,
2789 bool has_error_code, u32 error_code)
2791 const struct x86_emulate_ops *ops = ctxt->ops;
2792 struct desc_struct curr_tss_desc, next_tss_desc;
2794 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2795 ulong old_tss_base =
2796 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2800 /* FIXME: old_tss_base == ~0 ? */
2802 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2803 if (ret != X86EMUL_CONTINUE)
2805 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2806 if (ret != X86EMUL_CONTINUE)
2809 /* FIXME: check that next_tss_desc is tss */
2812 * Check privileges. The three cases are task switch caused by...
2814 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2815 * 2. Exception/IRQ/iret: No check is performed
2816 * 3. jmp/call to TSS: Check against DPL of the TSS
2818 if (reason == TASK_SWITCH_GATE) {
2819 if (idt_index != -1) {
2820 /* Software interrupts */
2821 struct desc_struct task_gate_desc;
2824 ret = read_interrupt_descriptor(ctxt, idt_index,
2826 if (ret != X86EMUL_CONTINUE)
2829 dpl = task_gate_desc.dpl;
2830 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2831 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2833 } else if (reason != TASK_SWITCH_IRET) {
2834 int dpl = next_tss_desc.dpl;
2835 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2836 return emulate_gp(ctxt, tss_selector);
2840 desc_limit = desc_limit_scaled(&next_tss_desc);
2841 if (!next_tss_desc.p ||
2842 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2843 desc_limit < 0x2b)) {
2844 return emulate_ts(ctxt, tss_selector & 0xfffc);
2847 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2848 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2849 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2852 if (reason == TASK_SWITCH_IRET)
2853 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2855 /* set back link to prev task only if NT bit is set in eflags
2856 note that old_tss_sel is not used after this point */
2857 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2858 old_tss_sel = 0xffff;
2860 if (next_tss_desc.type & 8)
2861 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2862 old_tss_base, &next_tss_desc);
2864 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2865 old_tss_base, &next_tss_desc);
2866 if (ret != X86EMUL_CONTINUE)
2869 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2870 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2872 if (reason != TASK_SWITCH_IRET) {
2873 next_tss_desc.type |= (1 << 1); /* set busy flag */
2874 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2877 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2878 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2880 if (has_error_code) {
2881 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2882 ctxt->lock_prefix = 0;
2883 ctxt->src.val = (unsigned long) error_code;
2884 ret = em_push(ctxt);
2890 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2891 u16 tss_selector, int idt_index, int reason,
2892 bool has_error_code, u32 error_code)
2896 invalidate_registers(ctxt);
2897 ctxt->_eip = ctxt->eip;
2898 ctxt->dst.type = OP_NONE;
2900 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2901 has_error_code, error_code);
2903 if (rc == X86EMUL_CONTINUE) {
2904 ctxt->eip = ctxt->_eip;
2905 writeback_registers(ctxt);
2908 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2911 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2914 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
2916 register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes);
2917 op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg));
2920 static int em_das(struct x86_emulate_ctxt *ctxt)
2923 bool af, cf, old_cf;
2925 cf = ctxt->eflags & X86_EFLAGS_CF;
2931 af = ctxt->eflags & X86_EFLAGS_AF;
2932 if ((al & 0x0f) > 9 || af) {
2934 cf = old_cf | (al >= 250);
2939 if (old_al > 0x99 || old_cf) {
2945 /* Set PF, ZF, SF */
2946 ctxt->src.type = OP_IMM;
2948 ctxt->src.bytes = 1;
2949 fastop(ctxt, em_or);
2950 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2952 ctxt->eflags |= X86_EFLAGS_CF;
2954 ctxt->eflags |= X86_EFLAGS_AF;
2955 return X86EMUL_CONTINUE;
2958 static int em_aam(struct x86_emulate_ctxt *ctxt)
2962 if (ctxt->src.val == 0)
2963 return emulate_de(ctxt);
2965 al = ctxt->dst.val & 0xff;
2966 ah = al / ctxt->src.val;
2967 al %= ctxt->src.val;
2969 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
2971 /* Set PF, ZF, SF */
2972 ctxt->src.type = OP_IMM;
2974 ctxt->src.bytes = 1;
2975 fastop(ctxt, em_or);
2977 return X86EMUL_CONTINUE;
2980 static int em_aad(struct x86_emulate_ctxt *ctxt)
2982 u8 al = ctxt->dst.val & 0xff;
2983 u8 ah = (ctxt->dst.val >> 8) & 0xff;
2985 al = (al + (ah * ctxt->src.val)) & 0xff;
2987 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
2989 /* Set PF, ZF, SF */
2990 ctxt->src.type = OP_IMM;
2992 ctxt->src.bytes = 1;
2993 fastop(ctxt, em_or);
2995 return X86EMUL_CONTINUE;
2998 static int em_call(struct x86_emulate_ctxt *ctxt)
3001 long rel = ctxt->src.val;
3003 ctxt->src.val = (unsigned long)ctxt->_eip;
3004 rc = jmp_rel(ctxt, rel);
3005 if (rc != X86EMUL_CONTINUE)
3007 return em_push(ctxt);
3010 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3015 struct desc_struct old_desc, new_desc;
3016 const struct x86_emulate_ops *ops = ctxt->ops;
3017 int cpl = ctxt->ops->cpl(ctxt);
3019 old_eip = ctxt->_eip;
3020 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3022 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3023 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
3025 if (rc != X86EMUL_CONTINUE)
3026 return X86EMUL_CONTINUE;
3028 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
3029 if (rc != X86EMUL_CONTINUE)
3032 ctxt->src.val = old_cs;
3034 if (rc != X86EMUL_CONTINUE)
3037 ctxt->src.val = old_eip;
3039 /* If we failed, we tainted the memory, but the very least we should
3041 if (rc != X86EMUL_CONTINUE)
3045 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3050 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3055 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3056 if (rc != X86EMUL_CONTINUE)
3058 rc = assign_eip_near(ctxt, eip);
3059 if (rc != X86EMUL_CONTINUE)
3061 rsp_increment(ctxt, ctxt->src.val);
3062 return X86EMUL_CONTINUE;
3065 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3067 /* Write back the register source. */
3068 ctxt->src.val = ctxt->dst.val;
3069 write_register_operand(&ctxt->src);
3071 /* Write back the memory destination with implicit LOCK prefix. */
3072 ctxt->dst.val = ctxt->src.orig_val;
3073 ctxt->lock_prefix = 1;
3074 return X86EMUL_CONTINUE;
3077 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3079 ctxt->dst.val = ctxt->src2.val;
3080 return fastop(ctxt, em_imul);
3083 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3085 ctxt->dst.type = OP_REG;
3086 ctxt->dst.bytes = ctxt->src.bytes;
3087 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3088 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3090 return X86EMUL_CONTINUE;
3093 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3097 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3098 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3099 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3100 return X86EMUL_CONTINUE;
3103 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3107 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3108 return emulate_gp(ctxt, 0);
3109 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3110 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3111 return X86EMUL_CONTINUE;
3114 static int em_mov(struct x86_emulate_ctxt *ctxt)
3116 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3117 return X86EMUL_CONTINUE;
3120 #define FFL(x) bit(X86_FEATURE_##x)
3122 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3124 u32 ebx, ecx, edx, eax = 1;
3128 * Check MOVBE is set in the guest-visible CPUID leaf.
3130 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3131 if (!(ecx & FFL(MOVBE)))
3132 return emulate_ud(ctxt);
3134 switch (ctxt->op_bytes) {
3137 * From MOVBE definition: "...When the operand size is 16 bits,
3138 * the upper word of the destination register remains unchanged
3141 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3142 * rules so we have to do the operation almost per hand.
3144 tmp = (u16)ctxt->src.val;
3145 ctxt->dst.val &= ~0xffffUL;
3146 ctxt->dst.val |= (unsigned long)swab16(tmp);
3149 ctxt->dst.val = swab32((u32)ctxt->src.val);
3152 ctxt->dst.val = swab64(ctxt->src.val);
3157 return X86EMUL_CONTINUE;
3160 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3162 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3163 return emulate_gp(ctxt, 0);
3165 /* Disable writeback. */
3166 ctxt->dst.type = OP_NONE;
3167 return X86EMUL_CONTINUE;
3170 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3174 if (ctxt->mode == X86EMUL_MODE_PROT64)
3175 val = ctxt->src.val & ~0ULL;
3177 val = ctxt->src.val & ~0U;
3179 /* #UD condition is already handled. */
3180 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3181 return emulate_gp(ctxt, 0);
3183 /* Disable writeback. */
3184 ctxt->dst.type = OP_NONE;
3185 return X86EMUL_CONTINUE;
3188 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3192 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3193 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3194 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3195 return emulate_gp(ctxt, 0);
3197 return X86EMUL_CONTINUE;
3200 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3204 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3205 return emulate_gp(ctxt, 0);
3207 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3208 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3209 return X86EMUL_CONTINUE;
3212 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3214 if (ctxt->modrm_reg > VCPU_SREG_GS)
3215 return emulate_ud(ctxt);
3217 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3218 return X86EMUL_CONTINUE;
3221 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3223 u16 sel = ctxt->src.val;
3225 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3226 return emulate_ud(ctxt);
3228 if (ctxt->modrm_reg == VCPU_SREG_SS)
3229 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3231 /* Disable writeback. */
3232 ctxt->dst.type = OP_NONE;
3233 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3236 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3238 u16 sel = ctxt->src.val;
3240 /* Disable writeback. */
3241 ctxt->dst.type = OP_NONE;
3242 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3245 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3247 u16 sel = ctxt->src.val;
3249 /* Disable writeback. */
3250 ctxt->dst.type = OP_NONE;
3251 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3254 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3259 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3260 if (rc == X86EMUL_CONTINUE)
3261 ctxt->ops->invlpg(ctxt, linear);
3262 /* Disable writeback. */
3263 ctxt->dst.type = OP_NONE;
3264 return X86EMUL_CONTINUE;
3267 static int em_clts(struct x86_emulate_ctxt *ctxt)
3271 cr0 = ctxt->ops->get_cr(ctxt, 0);
3273 ctxt->ops->set_cr(ctxt, 0, cr0);
3274 return X86EMUL_CONTINUE;
3277 static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3279 int rc = ctxt->ops->fix_hypercall(ctxt);
3281 if (rc != X86EMUL_CONTINUE)
3284 /* Let the processor re-execute the fixed hypercall */
3285 ctxt->_eip = ctxt->eip;
3286 /* Disable writeback. */
3287 ctxt->dst.type = OP_NONE;
3288 return X86EMUL_CONTINUE;
3291 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3292 void (*get)(struct x86_emulate_ctxt *ctxt,
3293 struct desc_ptr *ptr))
3295 struct desc_ptr desc_ptr;
3297 if (ctxt->mode == X86EMUL_MODE_PROT64)
3299 get(ctxt, &desc_ptr);
3300 if (ctxt->op_bytes == 2) {
3302 desc_ptr.address &= 0x00ffffff;
3304 /* Disable writeback. */
3305 ctxt->dst.type = OP_NONE;
3306 return segmented_write(ctxt, ctxt->dst.addr.mem,
3307 &desc_ptr, 2 + ctxt->op_bytes);
3310 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3312 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3315 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3317 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3320 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3322 struct desc_ptr desc_ptr;
3325 if (ctxt->mode == X86EMUL_MODE_PROT64)
3327 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3328 &desc_ptr.size, &desc_ptr.address,
3330 if (rc != X86EMUL_CONTINUE)
3332 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3333 /* Disable writeback. */
3334 ctxt->dst.type = OP_NONE;
3335 return X86EMUL_CONTINUE;
3338 static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
3342 rc = ctxt->ops->fix_hypercall(ctxt);
3344 /* Disable writeback. */
3345 ctxt->dst.type = OP_NONE;
3349 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3351 struct desc_ptr desc_ptr;
3354 if (ctxt->mode == X86EMUL_MODE_PROT64)
3356 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3357 &desc_ptr.size, &desc_ptr.address,
3359 if (rc != X86EMUL_CONTINUE)
3361 ctxt->ops->set_idt(ctxt, &desc_ptr);
3362 /* Disable writeback. */
3363 ctxt->dst.type = OP_NONE;
3364 return X86EMUL_CONTINUE;
3367 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3369 if (ctxt->dst.type == OP_MEM)
3370 ctxt->dst.bytes = 2;
3371 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3372 return X86EMUL_CONTINUE;
3375 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3377 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3378 | (ctxt->src.val & 0x0f));
3379 ctxt->dst.type = OP_NONE;
3380 return X86EMUL_CONTINUE;
3383 static int em_loop(struct x86_emulate_ctxt *ctxt)
3385 int rc = X86EMUL_CONTINUE;
3387 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
3388 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3389 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3390 rc = jmp_rel(ctxt, ctxt->src.val);
3395 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3397 int rc = X86EMUL_CONTINUE;
3399 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3400 rc = jmp_rel(ctxt, ctxt->src.val);
3405 static int em_in(struct x86_emulate_ctxt *ctxt)
3407 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3409 return X86EMUL_IO_NEEDED;
3411 return X86EMUL_CONTINUE;
3414 static int em_out(struct x86_emulate_ctxt *ctxt)
3416 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3418 /* Disable writeback. */
3419 ctxt->dst.type = OP_NONE;
3420 return X86EMUL_CONTINUE;
3423 static int em_cli(struct x86_emulate_ctxt *ctxt)
3425 if (emulator_bad_iopl(ctxt))
3426 return emulate_gp(ctxt, 0);
3428 ctxt->eflags &= ~X86_EFLAGS_IF;
3429 return X86EMUL_CONTINUE;
3432 static int em_sti(struct x86_emulate_ctxt *ctxt)
3434 if (emulator_bad_iopl(ctxt))
3435 return emulate_gp(ctxt, 0);
3437 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3438 ctxt->eflags |= X86_EFLAGS_IF;
3439 return X86EMUL_CONTINUE;
3442 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3444 u32 eax, ebx, ecx, edx;
3446 eax = reg_read(ctxt, VCPU_REGS_RAX);
3447 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3448 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3449 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3450 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3451 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3452 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3453 return X86EMUL_CONTINUE;
3456 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3460 flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
3461 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3463 ctxt->eflags &= ~0xffUL;
3464 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3465 return X86EMUL_CONTINUE;
3468 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3470 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3471 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3472 return X86EMUL_CONTINUE;
3475 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3477 switch (ctxt->op_bytes) {
3478 #ifdef CONFIG_X86_64
3480 asm("bswap %0" : "+r"(ctxt->dst.val));
3484 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3487 return X86EMUL_CONTINUE;
3490 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3492 /* emulating clflush regardless of cpuid */
3493 return X86EMUL_CONTINUE;
3496 static bool valid_cr(int nr)
3508 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3510 if (!valid_cr(ctxt->modrm_reg))
3511 return emulate_ud(ctxt);
3513 return X86EMUL_CONTINUE;
3516 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3518 u64 new_val = ctxt->src.val64;
3519 int cr = ctxt->modrm_reg;
3522 static u64 cr_reserved_bits[] = {
3523 0xffffffff00000000ULL,
3524 0, 0, 0, /* CR3 checked later */
3531 return emulate_ud(ctxt);
3533 if (new_val & cr_reserved_bits[cr])
3534 return emulate_gp(ctxt, 0);
3539 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3540 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3541 return emulate_gp(ctxt, 0);
3543 cr4 = ctxt->ops->get_cr(ctxt, 4);
3544 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3546 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3547 !(cr4 & X86_CR4_PAE))
3548 return emulate_gp(ctxt, 0);
3555 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3556 if (efer & EFER_LMA)
3557 rsvd = CR3_L_MODE_RESERVED_BITS;
3560 return emulate_gp(ctxt, 0);
3565 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3567 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3568 return emulate_gp(ctxt, 0);
3574 return X86EMUL_CONTINUE;
3577 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3581 ctxt->ops->get_dr(ctxt, 7, &dr7);
3583 /* Check if DR7.Global_Enable is set */
3584 return dr7 & (1 << 13);
3587 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3589 int dr = ctxt->modrm_reg;
3593 return emulate_ud(ctxt);
3595 cr4 = ctxt->ops->get_cr(ctxt, 4);
3596 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3597 return emulate_ud(ctxt);
3599 if (check_dr7_gd(ctxt))
3600 return emulate_db(ctxt);
3602 return X86EMUL_CONTINUE;
3605 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3607 u64 new_val = ctxt->src.val64;
3608 int dr = ctxt->modrm_reg;
3610 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3611 return emulate_gp(ctxt, 0);
3613 return check_dr_read(ctxt);
3616 static int check_svme(struct x86_emulate_ctxt *ctxt)
3620 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3622 if (!(efer & EFER_SVME))
3623 return emulate_ud(ctxt);
3625 return X86EMUL_CONTINUE;
3628 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3630 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3632 /* Valid physical address? */
3633 if (rax & 0xffff000000000000ULL)
3634 return emulate_gp(ctxt, 0);
3636 return check_svme(ctxt);
3639 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3641 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3643 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3644 return emulate_ud(ctxt);
3646 return X86EMUL_CONTINUE;
3649 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3651 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3652 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3654 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3655 ctxt->ops->check_pmc(ctxt, rcx))
3656 return emulate_gp(ctxt, 0);
3658 return X86EMUL_CONTINUE;
3661 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3663 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3664 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3665 return emulate_gp(ctxt, 0);
3667 return X86EMUL_CONTINUE;
3670 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3672 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3673 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3674 return emulate_gp(ctxt, 0);
3676 return X86EMUL_CONTINUE;
3679 #define D(_y) { .flags = (_y) }
3680 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3681 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3682 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3683 #define N D(NotImpl)
3684 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3685 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3686 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3687 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3688 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3689 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
3690 #define II(_f, _e, _i) \
3691 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
3692 #define IIP(_f, _e, _i, _p) \
3693 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3694 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3695 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3697 #define D2bv(_f) D((_f) | ByteOp), D(_f)
3698 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3699 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3700 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
3701 #define I2bvIP(_f, _e, _i, _p) \
3702 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3704 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3705 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3706 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3708 static const struct opcode group7_rm0[] = {
3710 I(SrcNone | Priv | EmulateOnUD, em_vmcall),
3714 static const struct opcode group7_rm1[] = {
3715 DI(SrcNone | Priv, monitor),
3716 DI(SrcNone | Priv, mwait),
3720 static const struct opcode group7_rm3[] = {
3721 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
3722 II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
3723 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3724 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3725 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3726 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3727 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3728 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
3731 static const struct opcode group7_rm7[] = {
3733 DIP(SrcNone, rdtscp, check_rdtsc),
3737 static const struct opcode group1[] = {
3739 F(Lock | PageTable, em_or),
3742 F(Lock | PageTable, em_and),
3748 static const struct opcode group1A[] = {
3749 I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
3752 static const struct opcode group2[] = {
3753 F(DstMem | ModRM, em_rol),
3754 F(DstMem | ModRM, em_ror),
3755 F(DstMem | ModRM, em_rcl),
3756 F(DstMem | ModRM, em_rcr),
3757 F(DstMem | ModRM, em_shl),
3758 F(DstMem | ModRM, em_shr),
3759 F(DstMem | ModRM, em_shl),
3760 F(DstMem | ModRM, em_sar),
3763 static const struct opcode group3[] = {
3764 F(DstMem | SrcImm | NoWrite, em_test),
3765 F(DstMem | SrcImm | NoWrite, em_test),
3766 F(DstMem | SrcNone | Lock, em_not),
3767 F(DstMem | SrcNone | Lock, em_neg),
3768 F(DstXacc | Src2Mem, em_mul_ex),
3769 F(DstXacc | Src2Mem, em_imul_ex),
3770 F(DstXacc | Src2Mem, em_div_ex),
3771 F(DstXacc | Src2Mem, em_idiv_ex),
3774 static const struct opcode group4[] = {
3775 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3776 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
3780 static const struct opcode group5[] = {
3781 F(DstMem | SrcNone | Lock, em_inc),
3782 F(DstMem | SrcNone | Lock, em_dec),
3783 I(SrcMem | Stack, em_grp45),
3784 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
3785 I(SrcMem | Stack, em_grp45),
3786 I(SrcMemFAddr | ImplicitOps, em_grp45),
3787 I(SrcMem | Stack, em_grp45), D(Undefined),
3790 static const struct opcode group6[] = {
3793 II(Prot | Priv | SrcMem16, em_lldt, lldt),
3794 II(Prot | Priv | SrcMem16, em_ltr, ltr),
3798 static const struct group_dual group7 = { {
3799 II(Mov | DstMem, em_sgdt, sgdt),
3800 II(Mov | DstMem, em_sidt, sidt),
3801 II(SrcMem | Priv, em_lgdt, lgdt),
3802 II(SrcMem | Priv, em_lidt, lidt),
3803 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3804 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3805 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3809 N, EXT(0, group7_rm3),
3810 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3811 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3815 static const struct opcode group8[] = {
3817 F(DstMem | SrcImmByte | NoWrite, em_bt),
3818 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3819 F(DstMem | SrcImmByte | Lock, em_btr),
3820 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
3823 static const struct group_dual group9 = { {
3824 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
3826 N, N, N, N, N, N, N, N,
3829 static const struct opcode group11[] = {
3830 I(DstMem | SrcImm | Mov | PageTable, em_mov),
3834 static const struct gprefix pfx_0f_ae_7 = {
3835 I(SrcMem | ByteOp, em_clflush), N, N, N,
3838 static const struct group_dual group15 = { {
3839 N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
3841 N, N, N, N, N, N, N, N,
3844 static const struct gprefix pfx_0f_6f_0f_7f = {
3845 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3848 static const struct gprefix pfx_0f_2b = {
3849 I(0, em_mov), I(0, em_mov), N, N,
3852 static const struct gprefix pfx_0f_28_0f_29 = {
3853 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
3856 static const struct gprefix pfx_0f_e7 = {
3857 N, I(Sse, em_mov), N, N,
3860 static const struct escape escape_d9 = { {
3861 N, N, N, N, N, N, N, I(DstMem, em_fnstcw),
3864 N, N, N, N, N, N, N, N,
3866 N, N, N, N, N, N, N, N,
3868 N, N, N, N, N, N, N, N,
3870 N, N, N, N, N, N, N, N,
3872 N, N, N, N, N, N, N, N,
3874 N, N, N, N, N, N, N, N,
3876 N, N, N, N, N, N, N, N,
3878 N, N, N, N, N, N, N, N,
3881 static const struct escape escape_db = { {
3882 N, N, N, N, N, N, N, N,
3885 N, N, N, N, N, N, N, N,
3887 N, N, N, N, N, N, N, N,
3889 N, N, N, N, N, N, N, N,
3891 N, N, N, N, N, N, N, N,
3893 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3895 N, N, N, N, N, N, N, N,
3897 N, N, N, N, N, N, N, N,
3899 N, N, N, N, N, N, N, N,
3902 static const struct escape escape_dd = { {
3903 N, N, N, N, N, N, N, I(DstMem, em_fnstsw),
3906 N, N, N, N, N, N, N, N,
3908 N, N, N, N, N, N, N, N,
3910 N, N, N, N, N, N, N, N,
3912 N, N, N, N, N, N, N, N,
3914 N, N, N, N, N, N, N, N,
3916 N, N, N, N, N, N, N, N,
3918 N, N, N, N, N, N, N, N,
3920 N, N, N, N, N, N, N, N,
3923 static const struct opcode opcode_table[256] = {
3925 F6ALU(Lock, em_add),
3926 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3927 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3929 F6ALU(Lock | PageTable, em_or),
3930 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3933 F6ALU(Lock, em_adc),
3934 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3935 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3937 F6ALU(Lock, em_sbb),
3938 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3939 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3941 F6ALU(Lock | PageTable, em_and), N, N,
3943 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3945 F6ALU(Lock, em_xor), N, N,
3947 F6ALU(NoWrite, em_cmp), N, N,
3949 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
3951 X8(I(SrcReg | Stack, em_push)),
3953 X8(I(DstReg | Stack, em_pop)),
3955 I(ImplicitOps | Stack | No64, em_pusha),
3956 I(ImplicitOps | Stack | No64, em_popa),
3957 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3960 I(SrcImm | Mov | Stack, em_push),
3961 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3962 I(SrcImmByte | Mov | Stack, em_push),
3963 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3964 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
3965 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
3969 G(ByteOp | DstMem | SrcImm, group1),
3970 G(DstMem | SrcImm, group1),
3971 G(ByteOp | DstMem | SrcImm | No64, group1),
3972 G(DstMem | SrcImmByte, group1),
3973 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
3974 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
3976 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
3977 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3978 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
3979 D(ModRM | SrcMem | NoAccess | DstReg),
3980 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3983 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3985 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3986 I(SrcImmFAddr | No64, em_call_far), N,
3987 II(ImplicitOps | Stack, em_pushf, pushf),
3988 II(ImplicitOps | Stack, em_popf, popf),
3989 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
3991 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3992 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
3993 I2bv(SrcSI | DstDI | Mov | String, em_mov),
3994 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp),
3996 F2bv(DstAcc | SrcImm | NoWrite, em_test),
3997 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
3998 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
3999 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp),
4001 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4003 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4005 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4006 I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
4007 I(ImplicitOps | Stack, em_ret),
4008 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4009 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4010 G(ByteOp, group11), G(0, group11),
4012 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4013 I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
4014 I(ImplicitOps | Stack, em_ret_far),
4015 D(ImplicitOps), DI(SrcImmByte, intn),
4016 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4018 G(Src2One | ByteOp, group2), G(Src2One, group2),
4019 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4020 I(DstAcc | SrcImmUByte | No64, em_aam),
4021 I(DstAcc | SrcImmUByte | No64, em_aad),
4022 F(DstAcc | ByteOp | No64, em_salc),
4023 I(DstAcc | SrcXLat | ByteOp, em_mov),
4025 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4027 X3(I(SrcImmByte, em_loop)),
4028 I(SrcImmByte, em_jcxz),
4029 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4030 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4032 I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps),
4033 I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
4034 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4035 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4037 N, DI(ImplicitOps, icebp), N, N,
4038 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4039 G(ByteOp, group3), G(0, group3),
4041 D(ImplicitOps), D(ImplicitOps),
4042 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4043 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4046 static const struct opcode twobyte_table[256] = {
4048 G(0, group6), GD(0, &group7), N, N,
4049 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4050 II(ImplicitOps | Priv, em_clts, clts), N,
4051 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4052 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4054 N, N, N, N, N, N, N, N,
4055 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4056 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4058 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4059 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4060 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4062 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4065 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4066 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4067 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4070 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4071 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4072 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4073 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4074 I(ImplicitOps | EmulateOnUD, em_sysenter),
4075 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4077 N, N, N, N, N, N, N, N,
4079 X16(D(DstReg | SrcMem | ModRM)),
4081 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4086 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4091 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4095 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4097 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4098 II(ImplicitOps, em_cpuid, cpuid),
4099 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4100 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4101 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4103 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4104 DI(ImplicitOps, rsm),
4105 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4106 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4107 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4108 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4110 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
4111 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4112 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4113 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4114 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4115 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4119 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4120 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
4121 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4123 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4124 N, D(DstMem | SrcReg | ModRM | Mov),
4125 N, N, N, GD(0, &group9),
4127 X8(I(DstReg, em_bswap)),
4129 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4131 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4132 N, N, N, N, N, N, N, N,
4134 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4137 static const struct gprefix three_byte_0f_38_f0 = {
4138 I(DstReg | SrcMem | Mov, em_movbe), N, N, N
4141 static const struct gprefix three_byte_0f_38_f1 = {
4142 I(DstMem | SrcReg | Mov, em_movbe), N, N, N
4146 * Insns below are selected by the prefix which indexed by the third opcode
4149 static const struct opcode opcode_map_0f_38[256] = {
4151 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4153 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4155 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0),
4156 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1),
4175 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4179 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4185 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4186 unsigned size, bool sign_extension)
4188 int rc = X86EMUL_CONTINUE;
4192 op->addr.mem.ea = ctxt->_eip;
4193 /* NB. Immediates are sign-extended as necessary. */
4194 switch (op->bytes) {
4196 op->val = insn_fetch(s8, ctxt);
4199 op->val = insn_fetch(s16, ctxt);
4202 op->val = insn_fetch(s32, ctxt);
4205 op->val = insn_fetch(s64, ctxt);
4208 if (!sign_extension) {
4209 switch (op->bytes) {
4217 op->val &= 0xffffffff;
4225 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4228 int rc = X86EMUL_CONTINUE;
4232 decode_register_operand(ctxt, op);
4235 rc = decode_imm(ctxt, op, 1, false);
4238 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4242 if (ctxt->d & BitOp)
4243 fetch_bit_operand(ctxt);
4244 op->orig_val = op->val;
4247 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4251 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4252 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4253 fetch_register_operand(op);
4254 op->orig_val = op->val;
4258 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4259 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4260 fetch_register_operand(op);
4261 op->orig_val = op->val;
4264 if (ctxt->d & ByteOp) {
4269 op->bytes = ctxt->op_bytes;
4270 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4271 fetch_register_operand(op);
4272 op->orig_val = op->val;
4276 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4278 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI));
4279 op->addr.mem.seg = VCPU_SREG_ES;
4286 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4287 fetch_register_operand(op);
4291 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4294 rc = decode_imm(ctxt, op, 1, true);
4301 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4304 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4307 ctxt->memop.bytes = 1;
4308 if (ctxt->memop.type == OP_REG) {
4309 ctxt->memop.addr.reg = decode_register(ctxt,
4310 ctxt->modrm_rm, true);
4311 fetch_register_operand(&ctxt->memop);
4315 ctxt->memop.bytes = 2;
4318 ctxt->memop.bytes = 4;
4321 rc = decode_imm(ctxt, op, 2, false);
4324 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4328 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4330 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI));
4331 op->addr.mem.seg = ctxt->seg_override;
4337 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4339 register_address(ctxt,
4340 reg_read(ctxt, VCPU_REGS_RBX) +
4341 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4342 op->addr.mem.seg = ctxt->seg_override;
4347 op->addr.mem.ea = ctxt->_eip;
4348 op->bytes = ctxt->op_bytes + 2;
4349 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4352 ctxt->memop.bytes = ctxt->op_bytes + 2;
4355 op->val = VCPU_SREG_ES;
4358 op->val = VCPU_SREG_CS;
4361 op->val = VCPU_SREG_SS;
4364 op->val = VCPU_SREG_DS;
4367 op->val = VCPU_SREG_FS;
4370 op->val = VCPU_SREG_GS;
4373 /* Special instructions do their own operand decoding. */
4375 op->type = OP_NONE; /* Disable writeback. */
4383 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4385 int rc = X86EMUL_CONTINUE;
4386 int mode = ctxt->mode;
4387 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4388 bool op_prefix = false;
4389 bool has_seg_override = false;
4390 struct opcode opcode;
4392 ctxt->memop.type = OP_NONE;
4393 ctxt->memopp = NULL;
4394 ctxt->_eip = ctxt->eip;
4395 ctxt->fetch.ptr = ctxt->fetch.data;
4396 ctxt->fetch.end = ctxt->fetch.data + insn_len;
4397 ctxt->opcode_len = 1;
4399 memcpy(ctxt->fetch.data, insn, insn_len);
4401 rc = __do_insn_fetch_bytes(ctxt, 1);
4402 if (rc != X86EMUL_CONTINUE)
4407 case X86EMUL_MODE_REAL:
4408 case X86EMUL_MODE_VM86:
4409 case X86EMUL_MODE_PROT16:
4410 def_op_bytes = def_ad_bytes = 2;
4412 case X86EMUL_MODE_PROT32:
4413 def_op_bytes = def_ad_bytes = 4;
4415 #ifdef CONFIG_X86_64
4416 case X86EMUL_MODE_PROT64:
4422 return EMULATION_FAILED;
4425 ctxt->op_bytes = def_op_bytes;
4426 ctxt->ad_bytes = def_ad_bytes;
4428 /* Legacy prefixes. */
4430 switch (ctxt->b = insn_fetch(u8, ctxt)) {
4431 case 0x66: /* operand-size override */
4433 /* switch between 2/4 bytes */
4434 ctxt->op_bytes = def_op_bytes ^ 6;
4436 case 0x67: /* address-size override */
4437 if (mode == X86EMUL_MODE_PROT64)
4438 /* switch between 4/8 bytes */
4439 ctxt->ad_bytes = def_ad_bytes ^ 12;
4441 /* switch between 2/4 bytes */
4442 ctxt->ad_bytes = def_ad_bytes ^ 6;
4444 case 0x26: /* ES override */
4445 case 0x2e: /* CS override */
4446 case 0x36: /* SS override */
4447 case 0x3e: /* DS override */
4448 has_seg_override = true;
4449 ctxt->seg_override = (ctxt->b >> 3) & 3;
4451 case 0x64: /* FS override */
4452 case 0x65: /* GS override */
4453 has_seg_override = true;
4454 ctxt->seg_override = ctxt->b & 7;
4456 case 0x40 ... 0x4f: /* REX */
4457 if (mode != X86EMUL_MODE_PROT64)
4459 ctxt->rex_prefix = ctxt->b;
4461 case 0xf0: /* LOCK */
4462 ctxt->lock_prefix = 1;
4464 case 0xf2: /* REPNE/REPNZ */
4465 case 0xf3: /* REP/REPE/REPZ */
4466 ctxt->rep_prefix = ctxt->b;
4472 /* Any legacy prefix after a REX prefix nullifies its effect. */
4474 ctxt->rex_prefix = 0;
4480 if (ctxt->rex_prefix & 8)
4481 ctxt->op_bytes = 8; /* REX.W */
4483 /* Opcode byte(s). */
4484 opcode = opcode_table[ctxt->b];
4485 /* Two-byte opcode? */
4486 if (ctxt->b == 0x0f) {
4487 ctxt->opcode_len = 2;
4488 ctxt->b = insn_fetch(u8, ctxt);
4489 opcode = twobyte_table[ctxt->b];
4491 /* 0F_38 opcode map */
4492 if (ctxt->b == 0x38) {
4493 ctxt->opcode_len = 3;
4494 ctxt->b = insn_fetch(u8, ctxt);
4495 opcode = opcode_map_0f_38[ctxt->b];
4498 ctxt->d = opcode.flags;
4500 if (ctxt->d & ModRM)
4501 ctxt->modrm = insn_fetch(u8, ctxt);
4503 /* vex-prefix instructions are not implemented */
4504 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4505 (mode == X86EMUL_MODE_PROT64 ||
4506 (mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) {
4510 while (ctxt->d & GroupMask) {
4511 switch (ctxt->d & GroupMask) {
4513 goffset = (ctxt->modrm >> 3) & 7;
4514 opcode = opcode.u.group[goffset];
4517 goffset = (ctxt->modrm >> 3) & 7;
4518 if ((ctxt->modrm >> 6) == 3)
4519 opcode = opcode.u.gdual->mod3[goffset];
4521 opcode = opcode.u.gdual->mod012[goffset];
4524 goffset = ctxt->modrm & 7;
4525 opcode = opcode.u.group[goffset];
4528 if (ctxt->rep_prefix && op_prefix)
4529 return EMULATION_FAILED;
4530 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4531 switch (simd_prefix) {
4532 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4533 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4534 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4535 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4539 if (ctxt->modrm > 0xbf)
4540 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4542 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4545 return EMULATION_FAILED;
4548 ctxt->d &= ~(u64)GroupMask;
4549 ctxt->d |= opcode.flags;
4554 return EMULATION_FAILED;
4556 ctxt->execute = opcode.u.execute;
4558 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
4559 return EMULATION_FAILED;
4561 if (unlikely(ctxt->d &
4562 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) {
4564 * These are copied unconditionally here, and checked unconditionally
4565 * in x86_emulate_insn.
4567 ctxt->check_perm = opcode.check_perm;
4568 ctxt->intercept = opcode.intercept;
4570 if (ctxt->d & NotImpl)
4571 return EMULATION_FAILED;
4573 if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
4576 if (ctxt->d & Op3264) {
4577 if (mode == X86EMUL_MODE_PROT64)
4584 ctxt->op_bytes = 16;
4585 else if (ctxt->d & Mmx)
4589 /* ModRM and SIB bytes. */
4590 if (ctxt->d & ModRM) {
4591 rc = decode_modrm(ctxt, &ctxt->memop);
4592 if (!has_seg_override) {
4593 has_seg_override = true;
4594 ctxt->seg_override = ctxt->modrm_seg;
4596 } else if (ctxt->d & MemAbs)
4597 rc = decode_abs(ctxt, &ctxt->memop);
4598 if (rc != X86EMUL_CONTINUE)
4601 if (!has_seg_override)
4602 ctxt->seg_override = VCPU_SREG_DS;
4604 ctxt->memop.addr.mem.seg = ctxt->seg_override;
4607 * Decode and fetch the source operand: register, memory
4610 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
4611 if (rc != X86EMUL_CONTINUE)
4615 * Decode and fetch the second source operand: register, memory
4618 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
4619 if (rc != X86EMUL_CONTINUE)
4622 /* Decode and fetch the destination operand: register or memory. */
4623 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4625 if (ctxt->rip_relative)
4626 ctxt->memopp->addr.mem.ea += ctxt->_eip;
4629 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
4632 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4634 return ctxt->d & PageTable;
4637 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4639 /* The second termination condition only applies for REPE
4640 * and REPNE. Test if the repeat string operation prefix is
4641 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4642 * corresponding termination condition according to:
4643 * - if REPE/REPZ and ZF = 0 then done
4644 * - if REPNE/REPNZ and ZF = 1 then done
4646 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4647 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4648 && (((ctxt->rep_prefix == REPE_PREFIX) &&
4649 ((ctxt->eflags & EFLG_ZF) == 0))
4650 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
4651 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4657 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4661 ctxt->ops->get_fpu(ctxt);
4662 asm volatile("1: fwait \n\t"
4664 ".pushsection .fixup,\"ax\" \n\t"
4666 "movb $1, %[fault] \n\t"
4669 _ASM_EXTABLE(1b, 3b)
4670 : [fault]"+qm"(fault));
4671 ctxt->ops->put_fpu(ctxt);
4673 if (unlikely(fault))
4674 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4676 return X86EMUL_CONTINUE;
4679 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4682 if (op->type == OP_MM)
4683 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4686 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4688 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
4689 if (!(ctxt->d & ByteOp))
4690 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
4691 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
4692 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4694 : "c"(ctxt->src2.val));
4695 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
4696 if (!fop) /* exception is returned in fop variable */
4697 return emulate_de(ctxt);
4698 return X86EMUL_CONTINUE;
4701 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
4703 memset(&ctxt->rip_relative, 0,
4704 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
4706 ctxt->io_read.pos = 0;
4707 ctxt->io_read.end = 0;
4708 ctxt->mem_read.end = 0;
4711 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4713 const struct x86_emulate_ops *ops = ctxt->ops;
4714 int rc = X86EMUL_CONTINUE;
4715 int saved_dst_type = ctxt->dst.type;
4717 ctxt->mem_read.pos = 0;
4719 /* LOCK prefix is allowed only with some instructions */
4720 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
4721 rc = emulate_ud(ctxt);
4725 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
4726 rc = emulate_ud(ctxt);
4730 if (unlikely(ctxt->d &
4731 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
4732 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4733 (ctxt->d & Undefined)) {
4734 rc = emulate_ud(ctxt);
4738 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4739 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4740 rc = emulate_ud(ctxt);
4744 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4745 rc = emulate_nm(ctxt);
4749 if (ctxt->d & Mmx) {
4750 rc = flush_pending_x87_faults(ctxt);
4751 if (rc != X86EMUL_CONTINUE)
4754 * Now that we know the fpu is exception safe, we can fetch
4757 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4758 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4759 if (!(ctxt->d & Mov))
4760 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4763 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4764 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4765 X86_ICPT_PRE_EXCEPT);
4766 if (rc != X86EMUL_CONTINUE)
4770 /* Privileged instruction can be executed only in CPL=0 */
4771 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
4772 if (ctxt->d & PrivUD)
4773 rc = emulate_ud(ctxt);
4775 rc = emulate_gp(ctxt, 0);
4779 /* Instruction can only be executed in protected mode */
4780 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4781 rc = emulate_ud(ctxt);
4785 /* Do instruction specific permission checks */
4786 if (ctxt->d & CheckPerm) {
4787 rc = ctxt->check_perm(ctxt);
4788 if (rc != X86EMUL_CONTINUE)
4792 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4793 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4794 X86_ICPT_POST_EXCEPT);
4795 if (rc != X86EMUL_CONTINUE)
4799 if (ctxt->rep_prefix && (ctxt->d & String)) {
4800 /* All REP prefixes have the same first termination condition */
4801 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4802 ctxt->eip = ctxt->_eip;
4803 ctxt->eflags &= ~EFLG_RF;
4809 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4810 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4811 ctxt->src.valptr, ctxt->src.bytes);
4812 if (rc != X86EMUL_CONTINUE)
4814 ctxt->src.orig_val64 = ctxt->src.val64;
4817 if (ctxt->src2.type == OP_MEM) {
4818 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4819 &ctxt->src2.val, ctxt->src2.bytes);
4820 if (rc != X86EMUL_CONTINUE)
4824 if ((ctxt->d & DstMask) == ImplicitOps)
4828 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
4829 /* optimisation - avoid slow emulated read if Mov */
4830 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4831 &ctxt->dst.val, ctxt->dst.bytes);
4832 if (rc != X86EMUL_CONTINUE)
4835 ctxt->dst.orig_val = ctxt->dst.val;
4839 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4840 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4841 X86_ICPT_POST_MEMACCESS);
4842 if (rc != X86EMUL_CONTINUE)
4846 if (ctxt->rep_prefix && (ctxt->d & String))
4847 ctxt->eflags |= EFLG_RF;
4849 ctxt->eflags &= ~EFLG_RF;
4851 if (ctxt->execute) {
4852 if (ctxt->d & Fastop) {
4853 void (*fop)(struct fastop *) = (void *)ctxt->execute;
4854 rc = fastop(ctxt, fop);
4855 if (rc != X86EMUL_CONTINUE)
4859 rc = ctxt->execute(ctxt);
4860 if (rc != X86EMUL_CONTINUE)
4865 if (ctxt->opcode_len == 2)
4867 else if (ctxt->opcode_len == 3)
4868 goto threebyte_insn;
4871 case 0x63: /* movsxd */
4872 if (ctxt->mode != X86EMUL_MODE_PROT64)
4873 goto cannot_emulate;
4874 ctxt->dst.val = (s32) ctxt->src.val;
4876 case 0x70 ... 0x7f: /* jcc (short) */
4877 if (test_cc(ctxt->b, ctxt->eflags))
4878 rc = jmp_rel(ctxt, ctxt->src.val);
4880 case 0x8d: /* lea r16/r32, m */
4881 ctxt->dst.val = ctxt->src.addr.mem.ea;
4883 case 0x90 ... 0x97: /* nop / xchg reg, rax */
4884 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
4885 ctxt->dst.type = OP_NONE;
4889 case 0x98: /* cbw/cwde/cdqe */
4890 switch (ctxt->op_bytes) {
4891 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4892 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4893 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
4896 case 0xcc: /* int3 */
4897 rc = emulate_int(ctxt, 3);
4899 case 0xcd: /* int n */
4900 rc = emulate_int(ctxt, ctxt->src.val);
4902 case 0xce: /* into */
4903 if (ctxt->eflags & EFLG_OF)
4904 rc = emulate_int(ctxt, 4);
4906 case 0xe9: /* jmp rel */
4907 case 0xeb: /* jmp rel short */
4908 rc = jmp_rel(ctxt, ctxt->src.val);
4909 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4911 case 0xf4: /* hlt */
4912 ctxt->ops->halt(ctxt);
4914 case 0xf5: /* cmc */
4915 /* complement carry flag from eflags reg */
4916 ctxt->eflags ^= EFLG_CF;
4918 case 0xf8: /* clc */
4919 ctxt->eflags &= ~EFLG_CF;
4921 case 0xf9: /* stc */
4922 ctxt->eflags |= EFLG_CF;
4924 case 0xfc: /* cld */
4925 ctxt->eflags &= ~EFLG_DF;
4927 case 0xfd: /* std */
4928 ctxt->eflags |= EFLG_DF;
4931 goto cannot_emulate;
4934 if (rc != X86EMUL_CONTINUE)
4938 if (ctxt->d & SrcWrite) {
4939 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
4940 rc = writeback(ctxt, &ctxt->src);
4941 if (rc != X86EMUL_CONTINUE)
4944 if (!(ctxt->d & NoWrite)) {
4945 rc = writeback(ctxt, &ctxt->dst);
4946 if (rc != X86EMUL_CONTINUE)
4951 * restore dst type in case the decoding will be reused
4952 * (happens for string instruction )
4954 ctxt->dst.type = saved_dst_type;
4956 if ((ctxt->d & SrcMask) == SrcSI)
4957 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
4959 if ((ctxt->d & DstMask) == DstDI)
4960 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
4962 if (ctxt->rep_prefix && (ctxt->d & String)) {
4964 struct read_cache *r = &ctxt->io_read;
4965 if ((ctxt->d & SrcMask) == SrcSI)
4966 count = ctxt->src.count;
4968 count = ctxt->dst.count;
4969 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX),
4972 if (!string_insn_completed(ctxt)) {
4974 * Re-enter guest when pio read ahead buffer is empty
4975 * or, if it is not used, after each 1024 iteration.
4977 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
4978 (r->end == 0 || r->end != r->pos)) {
4980 * Reset read cache. Usually happens before
4981 * decode, but since instruction is restarted
4982 * we have to do it here.
4984 ctxt->mem_read.end = 0;
4985 writeback_registers(ctxt);
4986 return EMULATION_RESTART;
4988 goto done; /* skip rip writeback */
4990 ctxt->eflags &= ~EFLG_RF;
4993 ctxt->eip = ctxt->_eip;
4996 if (rc == X86EMUL_PROPAGATE_FAULT) {
4997 WARN_ON(ctxt->exception.vector > 0x1f);
4998 ctxt->have_exception = true;
5000 if (rc == X86EMUL_INTERCEPTED)
5001 return EMULATION_INTERCEPTED;
5003 if (rc == X86EMUL_CONTINUE)
5004 writeback_registers(ctxt);
5006 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5010 case 0x09: /* wbinvd */
5011 (ctxt->ops->wbinvd)(ctxt);
5013 case 0x08: /* invd */
5014 case 0x0d: /* GrpP (prefetch) */
5015 case 0x18: /* Grp16 (prefetch/nop) */
5016 case 0x1f: /* nop */
5018 case 0x20: /* mov cr, reg */
5019 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5021 case 0x21: /* mov from dr to reg */
5022 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5024 case 0x40 ... 0x4f: /* cmov */
5025 if (test_cc(ctxt->b, ctxt->eflags))
5026 ctxt->dst.val = ctxt->src.val;
5027 else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
5028 ctxt->op_bytes != 4)
5029 ctxt->dst.type = OP_NONE; /* no writeback */
5031 case 0x80 ... 0x8f: /* jnz rel, etc*/
5032 if (test_cc(ctxt->b, ctxt->eflags))
5033 rc = jmp_rel(ctxt, ctxt->src.val);
5035 case 0x90 ... 0x9f: /* setcc r/m8 */
5036 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5038 case 0xb6 ... 0xb7: /* movzx */
5039 ctxt->dst.bytes = ctxt->op_bytes;
5040 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5041 : (u16) ctxt->src.val;
5043 case 0xbe ... 0xbf: /* movsx */
5044 ctxt->dst.bytes = ctxt->op_bytes;
5045 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5046 (s16) ctxt->src.val;
5048 case 0xc3: /* movnti */
5049 ctxt->dst.bytes = ctxt->op_bytes;
5050 ctxt->dst.val = (ctxt->op_bytes == 8) ? (u64) ctxt->src.val :
5051 (u32) ctxt->src.val;
5054 goto cannot_emulate;
5059 if (rc != X86EMUL_CONTINUE)
5065 return EMULATION_FAILED;
5068 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5070 invalidate_registers(ctxt);
5073 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5075 writeback_registers(ctxt);