1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <asm/kvm_emulate.h>
26 #include <linux/stringify.h>
27 #include <asm/debugreg.h>
28 #include <asm/nospec-branch.h>
39 #define OpImplicit 1ull /* No generic decode */
40 #define OpReg 2ull /* Register */
41 #define OpMem 3ull /* Memory */
42 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
43 #define OpDI 5ull /* ES:DI/EDI/RDI */
44 #define OpMem64 6ull /* Memory, 64-bit */
45 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
46 #define OpDX 8ull /* DX register */
47 #define OpCL 9ull /* CL register (for shifts) */
48 #define OpImmByte 10ull /* 8-bit sign extended immediate */
49 #define OpOne 11ull /* Implied 1 */
50 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
51 #define OpMem16 13ull /* Memory operand (16-bit). */
52 #define OpMem32 14ull /* Memory operand (32-bit). */
53 #define OpImmU 15ull /* Immediate operand, zero extended */
54 #define OpSI 16ull /* SI/ESI/RSI */
55 #define OpImmFAddr 17ull /* Immediate far address */
56 #define OpMemFAddr 18ull /* Far address in memory */
57 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
58 #define OpES 20ull /* ES */
59 #define OpCS 21ull /* CS */
60 #define OpSS 22ull /* SS */
61 #define OpDS 23ull /* DS */
62 #define OpFS 24ull /* FS */
63 #define OpGS 25ull /* GS */
64 #define OpMem8 26ull /* 8-bit zero extended memory operand */
65 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
66 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
67 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
68 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
70 #define OpBits 5 /* Width of operand field */
71 #define OpMask ((1ull << OpBits) - 1)
74 * Opcode effective-address decode tables.
75 * Note that we only emulate instructions that have at least one memory
76 * operand (excluding implicit stack references). We assume that stack
77 * references and instruction fetches will never occur in special memory
78 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
82 /* Operand sizes: 8-bit operands or specified/overridden size. */
83 #define ByteOp (1<<0) /* 8-bit operands. */
84 /* Destination operand type. */
86 #define ImplicitOps (OpImplicit << DstShift)
87 #define DstReg (OpReg << DstShift)
88 #define DstMem (OpMem << DstShift)
89 #define DstAcc (OpAcc << DstShift)
90 #define DstDI (OpDI << DstShift)
91 #define DstMem64 (OpMem64 << DstShift)
92 #define DstMem16 (OpMem16 << DstShift)
93 #define DstImmUByte (OpImmUByte << DstShift)
94 #define DstDX (OpDX << DstShift)
95 #define DstAccLo (OpAccLo << DstShift)
96 #define DstMask (OpMask << DstShift)
97 /* Source operand type. */
99 #define SrcNone (OpNone << SrcShift)
100 #define SrcReg (OpReg << SrcShift)
101 #define SrcMem (OpMem << SrcShift)
102 #define SrcMem16 (OpMem16 << SrcShift)
103 #define SrcMem32 (OpMem32 << SrcShift)
104 #define SrcImm (OpImm << SrcShift)
105 #define SrcImmByte (OpImmByte << SrcShift)
106 #define SrcOne (OpOne << SrcShift)
107 #define SrcImmUByte (OpImmUByte << SrcShift)
108 #define SrcImmU (OpImmU << SrcShift)
109 #define SrcSI (OpSI << SrcShift)
110 #define SrcXLat (OpXLat << SrcShift)
111 #define SrcImmFAddr (OpImmFAddr << SrcShift)
112 #define SrcMemFAddr (OpMemFAddr << SrcShift)
113 #define SrcAcc (OpAcc << SrcShift)
114 #define SrcImmU16 (OpImmU16 << SrcShift)
115 #define SrcImm64 (OpImm64 << SrcShift)
116 #define SrcDX (OpDX << SrcShift)
117 #define SrcMem8 (OpMem8 << SrcShift)
118 #define SrcAccHi (OpAccHi << SrcShift)
119 #define SrcMask (OpMask << SrcShift)
120 #define BitOp (1<<11)
121 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
122 #define String (1<<13) /* String instruction (rep capable) */
123 #define Stack (1<<14) /* Stack instruction (push/pop) */
124 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
125 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
126 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
127 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
128 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
129 #define Escape (5<<15) /* Escape to coprocessor instruction */
130 #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
131 #define ModeDual (7<<15) /* Different instruction for 32/64 bit */
132 #define Sse (1<<18) /* SSE Vector instruction */
133 /* Generic ModRM decode. */
134 #define ModRM (1<<19)
135 /* Destination is only written; never read. */
138 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
139 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
140 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
141 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
142 #define Undefined (1<<25) /* No Such Instruction */
143 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
144 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
146 #define PageTable (1 << 29) /* instruction used to write page table */
147 #define NotImpl (1 << 30) /* instruction is not implemented */
148 /* Source 2 operand type */
149 #define Src2Shift (31)
150 #define Src2None (OpNone << Src2Shift)
151 #define Src2Mem (OpMem << Src2Shift)
152 #define Src2CL (OpCL << Src2Shift)
153 #define Src2ImmByte (OpImmByte << Src2Shift)
154 #define Src2One (OpOne << Src2Shift)
155 #define Src2Imm (OpImm << Src2Shift)
156 #define Src2ES (OpES << Src2Shift)
157 #define Src2CS (OpCS << Src2Shift)
158 #define Src2SS (OpSS << Src2Shift)
159 #define Src2DS (OpDS << Src2Shift)
160 #define Src2FS (OpFS << Src2Shift)
161 #define Src2GS (OpGS << Src2Shift)
162 #define Src2Mask (OpMask << Src2Shift)
163 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
164 #define AlignMask ((u64)7 << 41)
165 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
166 #define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */
167 #define Avx ((u64)3 << 41) /* Advanced Vector Extensions */
168 #define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
169 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
170 #define NoWrite ((u64)1 << 45) /* No writeback */
171 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
172 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
173 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
174 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
175 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
176 #define NearBranch ((u64)1 << 52) /* Near branches */
177 #define No16 ((u64)1 << 53) /* No 16 bit operand */
178 #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
179 #define TwoMemOp ((u64)1 << 55) /* Instruction has two memory operand */
181 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
183 #define X2(x...) x, x
184 #define X3(x...) X2(x), x
185 #define X4(x...) X2(x), X2(x)
186 #define X5(x...) X4(x), x
187 #define X6(x...) X4(x), X2(x)
188 #define X7(x...) X4(x), X3(x)
189 #define X8(x...) X4(x), X4(x)
190 #define X16(x...) X8(x), X8(x)
192 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
193 #define FASTOP_SIZE 8
196 * fastop functions have a special calling convention:
201 * flags: rflags (in/out)
202 * ex: rsi (in:fastop pointer, out:zero if exception)
204 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
205 * different operand sizes can be reached by calculation, rather than a jump
206 * table (which would be bigger than the code).
208 * fastop functions are declared as taking a never-defined fastop parameter,
209 * so they can't be called from C directly.
218 int (*execute)(struct x86_emulate_ctxt *ctxt);
219 const struct opcode *group;
220 const struct group_dual *gdual;
221 const struct gprefix *gprefix;
222 const struct escape *esc;
223 const struct instr_dual *idual;
224 const struct mode_dual *mdual;
225 void (*fastop)(struct fastop *fake);
227 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
231 struct opcode mod012[8];
232 struct opcode mod3[8];
236 struct opcode pfx_no;
237 struct opcode pfx_66;
238 struct opcode pfx_f2;
239 struct opcode pfx_f3;
244 struct opcode high[64];
248 struct opcode mod012;
253 struct opcode mode32;
254 struct opcode mode64;
257 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
259 enum x86_transfer_type {
261 X86_TRANSFER_CALL_JMP,
263 X86_TRANSFER_TASK_SWITCH,
266 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
268 if (!(ctxt->regs_valid & (1 << nr))) {
269 ctxt->regs_valid |= 1 << nr;
270 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
272 return ctxt->_regs[nr];
275 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
277 ctxt->regs_valid |= 1 << nr;
278 ctxt->regs_dirty |= 1 << nr;
279 return &ctxt->_regs[nr];
282 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
285 return reg_write(ctxt, nr);
288 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
292 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
293 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
296 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
298 ctxt->regs_dirty = 0;
299 ctxt->regs_valid = 0;
303 * These EFLAGS bits are restored from saved value during emulation, and
304 * any changes are written back to the saved value after emulation.
306 #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
307 X86_EFLAGS_PF|X86_EFLAGS_CF)
315 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
317 #define FOP_FUNC(name) \
318 ".align " __stringify(FASTOP_SIZE) " \n\t" \
319 ".type " name ", @function \n\t" \
322 #define FOP_RET "ret \n\t"
324 #define FOP_START(op) \
325 extern void em_##op(struct fastop *fake); \
326 asm(".pushsection .text, \"ax\" \n\t" \
327 ".global em_" #op " \n\t" \
334 FOP_FUNC(__stringify(__UNIQUE_ID(nop))) \
337 #define FOP1E(op, dst) \
338 FOP_FUNC(#op "_" #dst) \
339 "10: " #op " %" #dst " \n\t" FOP_RET
341 #define FOP1EEX(op, dst) \
342 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
344 #define FASTOP1(op) \
349 ON64(FOP1E(op##q, rax)) \
352 /* 1-operand, using src2 (for MUL/DIV r/m) */
353 #define FASTOP1SRC2(op, name) \
358 ON64(FOP1E(op, rcx)) \
361 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
362 #define FASTOP1SRC2EX(op, name) \
367 ON64(FOP1EEX(op, rcx)) \
370 #define FOP2E(op, dst, src) \
371 FOP_FUNC(#op "_" #dst "_" #src) \
372 #op " %" #src ", %" #dst " \n\t" FOP_RET
374 #define FASTOP2(op) \
376 FOP2E(op##b, al, dl) \
377 FOP2E(op##w, ax, dx) \
378 FOP2E(op##l, eax, edx) \
379 ON64(FOP2E(op##q, rax, rdx)) \
382 /* 2 operand, word only */
383 #define FASTOP2W(op) \
386 FOP2E(op##w, ax, dx) \
387 FOP2E(op##l, eax, edx) \
388 ON64(FOP2E(op##q, rax, rdx)) \
391 /* 2 operand, src is CL */
392 #define FASTOP2CL(op) \
394 FOP2E(op##b, al, cl) \
395 FOP2E(op##w, ax, cl) \
396 FOP2E(op##l, eax, cl) \
397 ON64(FOP2E(op##q, rax, cl)) \
400 /* 2 operand, src and dest are reversed */
401 #define FASTOP2R(op, name) \
403 FOP2E(op##b, dl, al) \
404 FOP2E(op##w, dx, ax) \
405 FOP2E(op##l, edx, eax) \
406 ON64(FOP2E(op##q, rdx, rax)) \
409 #define FOP3E(op, dst, src, src2) \
410 FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
411 #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
413 /* 3-operand, word-only, src2=cl */
414 #define FASTOP3WCL(op) \
417 FOP3E(op##w, ax, dx, cl) \
418 FOP3E(op##l, eax, edx, cl) \
419 ON64(FOP3E(op##q, rax, rdx, cl)) \
422 /* Special case for SETcc - 1 instruction per cc */
423 #define FOP_SETCC(op) \
425 ".type " #op ", @function \n\t" \
430 asm(".pushsection .fixup, \"ax\"\n"
431 ".global kvm_fastop_exception \n"
432 "kvm_fastop_exception: xor %esi, %esi; ret\n"
454 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
458 * XXX: inoutclob user must know where the argument is being expanded.
459 * Relying on CONFIG_CC_HAS_ASM_GOTO would allow us to remove _fault.
461 #define asm_safe(insn, inoutclob...) \
465 asm volatile("1:" insn "\n" \
467 ".pushsection .fixup, \"ax\"\n" \
468 "3: movl $1, %[_fault]\n" \
471 _ASM_EXTABLE(1b, 3b) \
472 : [_fault] "+qm"(_fault) inoutclob ); \
474 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
477 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
478 enum x86_intercept intercept,
479 enum x86_intercept_stage stage)
481 struct x86_instruction_info info = {
482 .intercept = intercept,
483 .rep_prefix = ctxt->rep_prefix,
484 .modrm_mod = ctxt->modrm_mod,
485 .modrm_reg = ctxt->modrm_reg,
486 .modrm_rm = ctxt->modrm_rm,
487 .src_val = ctxt->src.val64,
488 .dst_val = ctxt->dst.val64,
489 .src_bytes = ctxt->src.bytes,
490 .dst_bytes = ctxt->dst.bytes,
491 .ad_bytes = ctxt->ad_bytes,
492 .next_rip = ctxt->eip,
495 return ctxt->ops->intercept(ctxt, &info, stage);
498 static void assign_masked(ulong *dest, ulong src, ulong mask)
500 *dest = (*dest & ~mask) | (src & mask);
503 static void assign_register(unsigned long *reg, u64 val, int bytes)
505 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
508 *(u8 *)reg = (u8)val;
511 *(u16 *)reg = (u16)val;
515 break; /* 64b: zero-extend */
522 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
524 return (1UL << (ctxt->ad_bytes << 3)) - 1;
527 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
530 struct desc_struct ss;
532 if (ctxt->mode == X86EMUL_MODE_PROT64)
534 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
535 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
538 static int stack_size(struct x86_emulate_ctxt *ctxt)
540 return (__fls(stack_mask(ctxt)) + 1) >> 3;
543 /* Access/update address held in a register, based on addressing mode. */
544 static inline unsigned long
545 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
547 if (ctxt->ad_bytes == sizeof(unsigned long))
550 return reg & ad_mask(ctxt);
553 static inline unsigned long
554 register_address(struct x86_emulate_ctxt *ctxt, int reg)
556 return address_mask(ctxt, reg_read(ctxt, reg));
559 static void masked_increment(ulong *reg, ulong mask, int inc)
561 assign_masked(reg, *reg + inc, mask);
565 register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
567 ulong *preg = reg_rmw(ctxt, reg);
569 assign_register(preg, *preg + inc, ctxt->ad_bytes);
572 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
574 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
577 static u32 desc_limit_scaled(struct desc_struct *desc)
579 u32 limit = get_desc_limit(desc);
581 return desc->g ? (limit << 12) | 0xfff : limit;
584 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
586 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
589 return ctxt->ops->get_cached_segment_base(ctxt, seg);
592 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
593 u32 error, bool valid)
596 ctxt->exception.vector = vec;
597 ctxt->exception.error_code = error;
598 ctxt->exception.error_code_valid = valid;
599 return X86EMUL_PROPAGATE_FAULT;
602 static int emulate_db(struct x86_emulate_ctxt *ctxt)
604 return emulate_exception(ctxt, DB_VECTOR, 0, false);
607 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
609 return emulate_exception(ctxt, GP_VECTOR, err, true);
612 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
614 return emulate_exception(ctxt, SS_VECTOR, err, true);
617 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
619 return emulate_exception(ctxt, UD_VECTOR, 0, false);
622 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
624 return emulate_exception(ctxt, TS_VECTOR, err, true);
627 static int emulate_de(struct x86_emulate_ctxt *ctxt)
629 return emulate_exception(ctxt, DE_VECTOR, 0, false);
632 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
634 return emulate_exception(ctxt, NM_VECTOR, 0, false);
637 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
640 struct desc_struct desc;
642 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
646 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
651 struct desc_struct desc;
653 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
654 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
658 * x86 defines three classes of vector instructions: explicitly
659 * aligned, explicitly unaligned, and the rest, which change behaviour
660 * depending on whether they're AVX encoded or not.
662 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
663 * subject to the same check. FXSAVE and FXRSTOR are checked here too as their
664 * 512 bytes of data must be aligned to a 16 byte boundary.
666 static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
668 u64 alignment = ctxt->d & AlignMask;
670 if (likely(size < 16))
685 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
686 struct segmented_address addr,
687 unsigned *max_size, unsigned size,
688 bool write, bool fetch,
689 enum x86emul_mode mode, ulong *linear)
691 struct desc_struct desc;
698 la = seg_base(ctxt, addr.seg) + addr.ea;
701 case X86EMUL_MODE_PROT64:
703 va_bits = ctxt_virt_addr_bits(ctxt);
704 if (get_canonical(la, va_bits) != la)
707 *max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
708 if (size > *max_size)
712 *linear = la = (u32)la;
713 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
717 /* code segment in protected mode or read-only data segment */
718 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
719 || !(desc.type & 2)) && write)
721 /* unreadable code segment */
722 if (!fetch && (desc.type & 8) && !(desc.type & 2))
724 lim = desc_limit_scaled(&desc);
725 if (!(desc.type & 8) && (desc.type & 4)) {
726 /* expand-down segment */
729 lim = desc.d ? 0xffffffff : 0xffff;
733 if (lim == 0xffffffff)
736 *max_size = (u64)lim + 1 - addr.ea;
737 if (size > *max_size)
742 if (la & (insn_alignment(ctxt, size) - 1))
743 return emulate_gp(ctxt, 0);
744 return X86EMUL_CONTINUE;
746 if (addr.seg == VCPU_SREG_SS)
747 return emulate_ss(ctxt, 0);
749 return emulate_gp(ctxt, 0);
752 static int linearize(struct x86_emulate_ctxt *ctxt,
753 struct segmented_address addr,
754 unsigned size, bool write,
758 return __linearize(ctxt, addr, &max_size, size, write, false,
762 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
763 enum x86emul_mode mode)
768 struct segmented_address addr = { .seg = VCPU_SREG_CS,
771 if (ctxt->op_bytes != sizeof(unsigned long))
772 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
773 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
774 if (rc == X86EMUL_CONTINUE)
775 ctxt->_eip = addr.ea;
779 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
781 return assign_eip(ctxt, dst, ctxt->mode);
784 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
785 const struct desc_struct *cs_desc)
787 enum x86emul_mode mode = ctxt->mode;
791 if (ctxt->mode >= X86EMUL_MODE_PROT16) {
795 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
797 mode = X86EMUL_MODE_PROT64;
799 mode = X86EMUL_MODE_PROT32; /* temporary value */
802 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
803 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
804 rc = assign_eip(ctxt, dst, mode);
805 if (rc == X86EMUL_CONTINUE)
810 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
812 return assign_eip_near(ctxt, ctxt->_eip + rel);
815 static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
816 void *data, unsigned size)
818 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
821 static int linear_write_system(struct x86_emulate_ctxt *ctxt,
822 ulong linear, void *data,
825 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
828 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
829 struct segmented_address addr,
836 rc = linearize(ctxt, addr, size, false, &linear);
837 if (rc != X86EMUL_CONTINUE)
839 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
842 static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
843 struct segmented_address addr,
850 rc = linearize(ctxt, addr, size, true, &linear);
851 if (rc != X86EMUL_CONTINUE)
853 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
857 * Prefetch the remaining bytes of the instruction without crossing page
858 * boundary if they are not in fetch_cache yet.
860 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
863 unsigned size, max_size;
864 unsigned long linear;
865 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
866 struct segmented_address addr = { .seg = VCPU_SREG_CS,
867 .ea = ctxt->eip + cur_size };
870 * We do not know exactly how many bytes will be needed, and
871 * __linearize is expensive, so fetch as much as possible. We
872 * just have to avoid going beyond the 15 byte limit, the end
873 * of the segment, or the end of the page.
875 * __linearize is called with size 0 so that it does not do any
876 * boundary check itself. Instead, we use max_size to check
879 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
881 if (unlikely(rc != X86EMUL_CONTINUE))
884 size = min_t(unsigned, 15UL ^ cur_size, max_size);
885 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
888 * One instruction can only straddle two pages,
889 * and one has been loaded at the beginning of
890 * x86_decode_insn. So, if not enough bytes
891 * still, we must have hit the 15-byte boundary.
893 if (unlikely(size < op_size))
894 return emulate_gp(ctxt, 0);
896 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
897 size, &ctxt->exception);
898 if (unlikely(rc != X86EMUL_CONTINUE))
900 ctxt->fetch.end += size;
901 return X86EMUL_CONTINUE;
904 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
907 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
909 if (unlikely(done_size < size))
910 return __do_insn_fetch_bytes(ctxt, size - done_size);
912 return X86EMUL_CONTINUE;
915 /* Fetch next part of the instruction being emulated. */
916 #define insn_fetch(_type, _ctxt) \
919 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
920 if (rc != X86EMUL_CONTINUE) \
922 ctxt->_eip += sizeof(_type); \
923 memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
924 ctxt->fetch.ptr += sizeof(_type); \
928 #define insn_fetch_arr(_arr, _size, _ctxt) \
930 rc = do_insn_fetch_bytes(_ctxt, _size); \
931 if (rc != X86EMUL_CONTINUE) \
933 ctxt->_eip += (_size); \
934 memcpy(_arr, ctxt->fetch.ptr, _size); \
935 ctxt->fetch.ptr += (_size); \
939 * Given the 'reg' portion of a ModRM byte, and a register block, return a
940 * pointer into the block that addresses the relevant register.
941 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
943 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
947 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
949 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
950 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
952 p = reg_rmw(ctxt, modrm_reg);
956 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
957 struct segmented_address addr,
958 u16 *size, unsigned long *address, int op_bytes)
965 rc = segmented_read_std(ctxt, addr, size, 2);
966 if (rc != X86EMUL_CONTINUE)
969 rc = segmented_read_std(ctxt, addr, address, op_bytes);
983 FASTOP1SRC2(mul, mul_ex);
984 FASTOP1SRC2(imul, imul_ex);
985 FASTOP1SRC2EX(div, div_ex);
986 FASTOP1SRC2EX(idiv, idiv_ex);
1015 FASTOP2R(cmp, cmp_r);
1017 static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1019 /* If src is zero, do not writeback, but update flags */
1020 if (ctxt->src.val == 0)
1021 ctxt->dst.type = OP_NONE;
1022 return fastop(ctxt, em_bsf);
1025 static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1027 /* If src is zero, do not writeback, but update flags */
1028 if (ctxt->src.val == 0)
1029 ctxt->dst.type = OP_NONE;
1030 return fastop(ctxt, em_bsr);
1033 static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1036 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
1038 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1039 asm("push %[flags]; popf; " CALL_NOSPEC
1040 : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
1044 static void fetch_register_operand(struct operand *op)
1046 switch (op->bytes) {
1048 op->val = *(u8 *)op->addr.reg;
1051 op->val = *(u16 *)op->addr.reg;
1054 op->val = *(u32 *)op->addr.reg;
1057 op->val = *(u64 *)op->addr.reg;
1062 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
1065 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
1066 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
1067 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
1068 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
1069 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
1070 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
1071 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
1072 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
1073 #ifdef CONFIG_X86_64
1074 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
1075 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
1076 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
1077 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
1078 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
1079 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
1080 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
1081 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
1087 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
1091 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1092 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
1093 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
1094 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
1095 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
1096 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
1097 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
1098 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
1099 #ifdef CONFIG_X86_64
1100 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1101 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1102 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1103 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1104 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1105 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1106 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1107 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1113 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1116 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1117 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1118 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1119 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1120 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1121 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1122 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1123 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1128 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1131 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1132 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1133 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1134 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1135 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1136 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1137 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1138 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1143 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1145 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1146 return emulate_nm(ctxt);
1148 asm volatile("fninit");
1149 return X86EMUL_CONTINUE;
1152 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1156 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1157 return emulate_nm(ctxt);
1159 asm volatile("fnstcw %0": "+m"(fcw));
1161 ctxt->dst.val = fcw;
1163 return X86EMUL_CONTINUE;
1166 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1170 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1171 return emulate_nm(ctxt);
1173 asm volatile("fnstsw %0": "+m"(fsw));
1175 ctxt->dst.val = fsw;
1177 return X86EMUL_CONTINUE;
1180 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1183 unsigned reg = ctxt->modrm_reg;
1185 if (!(ctxt->d & ModRM))
1186 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1188 if (ctxt->d & Sse) {
1192 read_sse_reg(ctxt, &op->vec_val, reg);
1195 if (ctxt->d & Mmx) {
1204 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1205 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1207 fetch_register_operand(op);
1208 op->orig_val = op->val;
1211 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1213 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1214 ctxt->modrm_seg = VCPU_SREG_SS;
1217 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1221 int index_reg, base_reg, scale;
1222 int rc = X86EMUL_CONTINUE;
1225 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1226 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1227 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1229 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1230 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1231 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1232 ctxt->modrm_seg = VCPU_SREG_DS;
1234 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1236 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1237 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1239 if (ctxt->d & Sse) {
1242 op->addr.xmm = ctxt->modrm_rm;
1243 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1246 if (ctxt->d & Mmx) {
1249 op->addr.mm = ctxt->modrm_rm & 7;
1252 fetch_register_operand(op);
1258 if (ctxt->ad_bytes == 2) {
1259 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1260 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1261 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1262 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1264 /* 16-bit ModR/M decode. */
1265 switch (ctxt->modrm_mod) {
1267 if (ctxt->modrm_rm == 6)
1268 modrm_ea += insn_fetch(u16, ctxt);
1271 modrm_ea += insn_fetch(s8, ctxt);
1274 modrm_ea += insn_fetch(u16, ctxt);
1277 switch (ctxt->modrm_rm) {
1279 modrm_ea += bx + si;
1282 modrm_ea += bx + di;
1285 modrm_ea += bp + si;
1288 modrm_ea += bp + di;
1297 if (ctxt->modrm_mod != 0)
1304 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1305 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1306 ctxt->modrm_seg = VCPU_SREG_SS;
1307 modrm_ea = (u16)modrm_ea;
1309 /* 32/64-bit ModR/M decode. */
1310 if ((ctxt->modrm_rm & 7) == 4) {
1311 sib = insn_fetch(u8, ctxt);
1312 index_reg |= (sib >> 3) & 7;
1313 base_reg |= sib & 7;
1316 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1317 modrm_ea += insn_fetch(s32, ctxt);
1319 modrm_ea += reg_read(ctxt, base_reg);
1320 adjust_modrm_seg(ctxt, base_reg);
1321 /* Increment ESP on POP [ESP] */
1322 if ((ctxt->d & IncSP) &&
1323 base_reg == VCPU_REGS_RSP)
1324 modrm_ea += ctxt->op_bytes;
1327 modrm_ea += reg_read(ctxt, index_reg) << scale;
1328 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1329 modrm_ea += insn_fetch(s32, ctxt);
1330 if (ctxt->mode == X86EMUL_MODE_PROT64)
1331 ctxt->rip_relative = 1;
1333 base_reg = ctxt->modrm_rm;
1334 modrm_ea += reg_read(ctxt, base_reg);
1335 adjust_modrm_seg(ctxt, base_reg);
1337 switch (ctxt->modrm_mod) {
1339 modrm_ea += insn_fetch(s8, ctxt);
1342 modrm_ea += insn_fetch(s32, ctxt);
1346 op->addr.mem.ea = modrm_ea;
1347 if (ctxt->ad_bytes != 8)
1348 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1354 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1357 int rc = X86EMUL_CONTINUE;
1360 switch (ctxt->ad_bytes) {
1362 op->addr.mem.ea = insn_fetch(u16, ctxt);
1365 op->addr.mem.ea = insn_fetch(u32, ctxt);
1368 op->addr.mem.ea = insn_fetch(u64, ctxt);
1375 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1379 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1380 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1382 if (ctxt->src.bytes == 2)
1383 sv = (s16)ctxt->src.val & (s16)mask;
1384 else if (ctxt->src.bytes == 4)
1385 sv = (s32)ctxt->src.val & (s32)mask;
1387 sv = (s64)ctxt->src.val & (s64)mask;
1389 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1390 ctxt->dst.addr.mem.ea + (sv >> 3));
1393 /* only subword offset */
1394 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1397 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1398 unsigned long addr, void *dest, unsigned size)
1401 struct read_cache *mc = &ctxt->mem_read;
1403 if (mc->pos < mc->end)
1406 WARN_ON((mc->end + size) >= sizeof(mc->data));
1408 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1410 if (rc != X86EMUL_CONTINUE)
1416 memcpy(dest, mc->data + mc->pos, size);
1418 return X86EMUL_CONTINUE;
1421 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1422 struct segmented_address addr,
1429 rc = linearize(ctxt, addr, size, false, &linear);
1430 if (rc != X86EMUL_CONTINUE)
1432 return read_emulated(ctxt, linear, data, size);
1435 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1436 struct segmented_address addr,
1443 rc = linearize(ctxt, addr, size, true, &linear);
1444 if (rc != X86EMUL_CONTINUE)
1446 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1450 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1451 struct segmented_address addr,
1452 const void *orig_data, const void *data,
1458 rc = linearize(ctxt, addr, size, true, &linear);
1459 if (rc != X86EMUL_CONTINUE)
1461 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1462 size, &ctxt->exception);
1465 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1466 unsigned int size, unsigned short port,
1469 struct read_cache *rc = &ctxt->io_read;
1471 if (rc->pos == rc->end) { /* refill pio read ahead */
1472 unsigned int in_page, n;
1473 unsigned int count = ctxt->rep_prefix ?
1474 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1475 in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1476 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1477 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1478 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1481 rc->pos = rc->end = 0;
1482 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1487 if (ctxt->rep_prefix && (ctxt->d & String) &&
1488 !(ctxt->eflags & X86_EFLAGS_DF)) {
1489 ctxt->dst.data = rc->data + rc->pos;
1490 ctxt->dst.type = OP_MEM_STR;
1491 ctxt->dst.count = (rc->end - rc->pos) / size;
1494 memcpy(dest, rc->data + rc->pos, size);
1500 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1501 u16 index, struct desc_struct *desc)
1506 ctxt->ops->get_idt(ctxt, &dt);
1508 if (dt.size < index * 8 + 7)
1509 return emulate_gp(ctxt, index << 3 | 0x2);
1511 addr = dt.address + index * 8;
1512 return linear_read_system(ctxt, addr, desc, sizeof(*desc));
1515 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1516 u16 selector, struct desc_ptr *dt)
1518 const struct x86_emulate_ops *ops = ctxt->ops;
1521 if (selector & 1 << 2) {
1522 struct desc_struct desc;
1525 memset(dt, 0, sizeof(*dt));
1526 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1530 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1531 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1533 ops->get_gdt(ctxt, dt);
1536 static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1537 u16 selector, ulong *desc_addr_p)
1540 u16 index = selector >> 3;
1543 get_descriptor_table_ptr(ctxt, selector, &dt);
1545 if (dt.size < index * 8 + 7)
1546 return emulate_gp(ctxt, selector & 0xfffc);
1548 addr = dt.address + index * 8;
1550 #ifdef CONFIG_X86_64
1551 if (addr >> 32 != 0) {
1554 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1555 if (!(efer & EFER_LMA))
1560 *desc_addr_p = addr;
1561 return X86EMUL_CONTINUE;
1564 /* allowed just for 8 bytes segments */
1565 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1566 u16 selector, struct desc_struct *desc,
1571 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1572 if (rc != X86EMUL_CONTINUE)
1575 return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
1578 /* allowed just for 8 bytes segments */
1579 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1580 u16 selector, struct desc_struct *desc)
1585 rc = get_descriptor_ptr(ctxt, selector, &addr);
1586 if (rc != X86EMUL_CONTINUE)
1589 return linear_write_system(ctxt, addr, desc, sizeof(*desc));
1592 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1593 u16 selector, int seg, u8 cpl,
1594 enum x86_transfer_type transfer,
1595 struct desc_struct *desc)
1597 struct desc_struct seg_desc, old_desc;
1599 unsigned err_vec = GP_VECTOR;
1601 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1607 memset(&seg_desc, 0, sizeof(seg_desc));
1609 if (ctxt->mode == X86EMUL_MODE_REAL) {
1610 /* set real mode segment descriptor (keep limit etc. for
1612 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1613 set_desc_base(&seg_desc, selector << 4);
1615 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1616 /* VM86 needs a clean new segment descriptor */
1617 set_desc_base(&seg_desc, selector << 4);
1618 set_desc_limit(&seg_desc, 0xffff);
1628 /* TR should be in GDT only */
1629 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1632 /* NULL selector is not valid for TR, CS and (except for long mode) SS */
1633 if (null_selector) {
1634 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1637 if (seg == VCPU_SREG_SS) {
1638 if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1642 * ctxt->ops->set_segment expects the CPL to be in
1643 * SS.DPL, so fake an expand-up 32-bit data segment.
1653 /* Skip all following checks */
1657 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1658 if (ret != X86EMUL_CONTINUE)
1661 err_code = selector & 0xfffc;
1662 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1665 /* can't load system descriptor into segment selector */
1666 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1667 if (transfer == X86_TRANSFER_CALL_JMP)
1668 return X86EMUL_UNHANDLEABLE;
1673 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1682 * segment is not a writable data segment or segment
1683 * selector's RPL != CPL or segment selector's RPL != CPL
1685 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1689 if (!(seg_desc.type & 8))
1692 if (seg_desc.type & 4) {
1698 if (rpl > cpl || dpl != cpl)
1701 /* in long-mode d/b must be clear if l is set */
1702 if (seg_desc.d && seg_desc.l) {
1705 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1706 if (efer & EFER_LMA)
1710 /* CS(RPL) <- CPL */
1711 selector = (selector & 0xfffc) | cpl;
1714 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1716 old_desc = seg_desc;
1717 seg_desc.type |= 2; /* busy */
1718 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1719 sizeof(seg_desc), &ctxt->exception);
1720 if (ret != X86EMUL_CONTINUE)
1723 case VCPU_SREG_LDTR:
1724 if (seg_desc.s || seg_desc.type != 2)
1727 default: /* DS, ES, FS, or GS */
1729 * segment is not a data or readable code segment or
1730 * ((segment is a data or nonconforming code segment)
1731 * and (both RPL and CPL > DPL))
1733 if ((seg_desc.type & 0xa) == 0x8 ||
1734 (((seg_desc.type & 0xc) != 0xc) &&
1735 (rpl > dpl && cpl > dpl)))
1741 /* mark segment as accessed */
1742 if (!(seg_desc.type & 1)) {
1744 ret = write_segment_descriptor(ctxt, selector,
1746 if (ret != X86EMUL_CONTINUE)
1749 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1750 ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
1751 if (ret != X86EMUL_CONTINUE)
1753 if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
1754 ((u64)base3 << 32), ctxt))
1755 return emulate_gp(ctxt, 0);
1758 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1761 return X86EMUL_CONTINUE;
1763 return emulate_exception(ctxt, err_vec, err_code, true);
1766 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1767 u16 selector, int seg)
1769 u8 cpl = ctxt->ops->cpl(ctxt);
1772 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
1773 * they can load it at CPL<3 (Intel's manual says only LSS can,
1776 * However, the Intel manual says that putting IST=1/DPL=3 in
1777 * an interrupt gate will result in SS=3 (the AMD manual instead
1778 * says it doesn't), so allow SS=3 in __load_segment_descriptor
1779 * and only forbid it here.
1781 if (seg == VCPU_SREG_SS && selector == 3 &&
1782 ctxt->mode == X86EMUL_MODE_PROT64)
1783 return emulate_exception(ctxt, GP_VECTOR, 0, true);
1785 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1786 X86_TRANSFER_NONE, NULL);
1789 static void write_register_operand(struct operand *op)
1791 return assign_register(op->addr.reg, op->val, op->bytes);
1794 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1798 write_register_operand(op);
1801 if (ctxt->lock_prefix)
1802 return segmented_cmpxchg(ctxt,
1808 return segmented_write(ctxt,
1814 return segmented_write(ctxt,
1817 op->bytes * op->count);
1820 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1823 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1831 return X86EMUL_CONTINUE;
1834 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1836 struct segmented_address addr;
1838 rsp_increment(ctxt, -bytes);
1839 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1840 addr.seg = VCPU_SREG_SS;
1842 return segmented_write(ctxt, addr, data, bytes);
1845 static int em_push(struct x86_emulate_ctxt *ctxt)
1847 /* Disable writeback. */
1848 ctxt->dst.type = OP_NONE;
1849 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1852 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1853 void *dest, int len)
1856 struct segmented_address addr;
1858 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1859 addr.seg = VCPU_SREG_SS;
1860 rc = segmented_read(ctxt, addr, dest, len);
1861 if (rc != X86EMUL_CONTINUE)
1864 rsp_increment(ctxt, len);
1868 static int em_pop(struct x86_emulate_ctxt *ctxt)
1870 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1873 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1874 void *dest, int len)
1877 unsigned long val, change_mask;
1878 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1879 int cpl = ctxt->ops->cpl(ctxt);
1881 rc = emulate_pop(ctxt, &val, len);
1882 if (rc != X86EMUL_CONTINUE)
1885 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1886 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1887 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1888 X86_EFLAGS_AC | X86_EFLAGS_ID;
1890 switch(ctxt->mode) {
1891 case X86EMUL_MODE_PROT64:
1892 case X86EMUL_MODE_PROT32:
1893 case X86EMUL_MODE_PROT16:
1895 change_mask |= X86_EFLAGS_IOPL;
1897 change_mask |= X86_EFLAGS_IF;
1899 case X86EMUL_MODE_VM86:
1901 return emulate_gp(ctxt, 0);
1902 change_mask |= X86_EFLAGS_IF;
1904 default: /* real mode */
1905 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1909 *(unsigned long *)dest =
1910 (ctxt->eflags & ~change_mask) | (val & change_mask);
1915 static int em_popf(struct x86_emulate_ctxt *ctxt)
1917 ctxt->dst.type = OP_REG;
1918 ctxt->dst.addr.reg = &ctxt->eflags;
1919 ctxt->dst.bytes = ctxt->op_bytes;
1920 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1923 static int em_enter(struct x86_emulate_ctxt *ctxt)
1926 unsigned frame_size = ctxt->src.val;
1927 unsigned nesting_level = ctxt->src2.val & 31;
1931 return X86EMUL_UNHANDLEABLE;
1933 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1934 rc = push(ctxt, &rbp, stack_size(ctxt));
1935 if (rc != X86EMUL_CONTINUE)
1937 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1939 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1940 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1942 return X86EMUL_CONTINUE;
1945 static int em_leave(struct x86_emulate_ctxt *ctxt)
1947 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1949 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1952 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1954 int seg = ctxt->src2.val;
1956 ctxt->src.val = get_segment_selector(ctxt, seg);
1957 if (ctxt->op_bytes == 4) {
1958 rsp_increment(ctxt, -2);
1962 return em_push(ctxt);
1965 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1967 int seg = ctxt->src2.val;
1968 unsigned long selector;
1971 rc = emulate_pop(ctxt, &selector, 2);
1972 if (rc != X86EMUL_CONTINUE)
1975 if (ctxt->modrm_reg == VCPU_SREG_SS)
1976 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1977 if (ctxt->op_bytes > 2)
1978 rsp_increment(ctxt, ctxt->op_bytes - 2);
1980 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1984 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1986 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1987 int rc = X86EMUL_CONTINUE;
1988 int reg = VCPU_REGS_RAX;
1990 while (reg <= VCPU_REGS_RDI) {
1991 (reg == VCPU_REGS_RSP) ?
1992 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1995 if (rc != X86EMUL_CONTINUE)
2004 static int em_pushf(struct x86_emulate_ctxt *ctxt)
2006 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
2007 return em_push(ctxt);
2010 static int em_popa(struct x86_emulate_ctxt *ctxt)
2012 int rc = X86EMUL_CONTINUE;
2013 int reg = VCPU_REGS_RDI;
2016 while (reg >= VCPU_REGS_RAX) {
2017 if (reg == VCPU_REGS_RSP) {
2018 rsp_increment(ctxt, ctxt->op_bytes);
2022 rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2023 if (rc != X86EMUL_CONTINUE)
2025 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2031 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2033 const struct x86_emulate_ops *ops = ctxt->ops;
2040 /* TODO: Add limit checks */
2041 ctxt->src.val = ctxt->eflags;
2043 if (rc != X86EMUL_CONTINUE)
2046 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2048 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2050 if (rc != X86EMUL_CONTINUE)
2053 ctxt->src.val = ctxt->_eip;
2055 if (rc != X86EMUL_CONTINUE)
2058 ops->get_idt(ctxt, &dt);
2060 eip_addr = dt.address + (irq << 2);
2061 cs_addr = dt.address + (irq << 2) + 2;
2063 rc = linear_read_system(ctxt, cs_addr, &cs, 2);
2064 if (rc != X86EMUL_CONTINUE)
2067 rc = linear_read_system(ctxt, eip_addr, &eip, 2);
2068 if (rc != X86EMUL_CONTINUE)
2071 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2072 if (rc != X86EMUL_CONTINUE)
2080 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2084 invalidate_registers(ctxt);
2085 rc = __emulate_int_real(ctxt, irq);
2086 if (rc == X86EMUL_CONTINUE)
2087 writeback_registers(ctxt);
2091 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2093 switch(ctxt->mode) {
2094 case X86EMUL_MODE_REAL:
2095 return __emulate_int_real(ctxt, irq);
2096 case X86EMUL_MODE_VM86:
2097 case X86EMUL_MODE_PROT16:
2098 case X86EMUL_MODE_PROT32:
2099 case X86EMUL_MODE_PROT64:
2101 /* Protected mode interrupts unimplemented yet */
2102 return X86EMUL_UNHANDLEABLE;
2106 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2108 int rc = X86EMUL_CONTINUE;
2109 unsigned long temp_eip = 0;
2110 unsigned long temp_eflags = 0;
2111 unsigned long cs = 0;
2112 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2113 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2114 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2115 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2116 X86_EFLAGS_AC | X86_EFLAGS_ID |
2118 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2121 /* TODO: Add stack limit check */
2123 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2125 if (rc != X86EMUL_CONTINUE)
2128 if (temp_eip & ~0xffff)
2129 return emulate_gp(ctxt, 0);
2131 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2133 if (rc != X86EMUL_CONTINUE)
2136 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2138 if (rc != X86EMUL_CONTINUE)
2141 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2143 if (rc != X86EMUL_CONTINUE)
2146 ctxt->_eip = temp_eip;
2148 if (ctxt->op_bytes == 4)
2149 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2150 else if (ctxt->op_bytes == 2) {
2151 ctxt->eflags &= ~0xffff;
2152 ctxt->eflags |= temp_eflags;
2155 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2156 ctxt->eflags |= X86_EFLAGS_FIXED;
2157 ctxt->ops->set_nmi_mask(ctxt, false);
2162 static int em_iret(struct x86_emulate_ctxt *ctxt)
2164 switch(ctxt->mode) {
2165 case X86EMUL_MODE_REAL:
2166 return emulate_iret_real(ctxt);
2167 case X86EMUL_MODE_VM86:
2168 case X86EMUL_MODE_PROT16:
2169 case X86EMUL_MODE_PROT32:
2170 case X86EMUL_MODE_PROT64:
2172 /* iret from protected mode unimplemented yet */
2173 return X86EMUL_UNHANDLEABLE;
2177 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2181 struct desc_struct new_desc;
2182 u8 cpl = ctxt->ops->cpl(ctxt);
2184 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2186 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2187 X86_TRANSFER_CALL_JMP,
2189 if (rc != X86EMUL_CONTINUE)
2192 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2193 /* Error handling is not implemented. */
2194 if (rc != X86EMUL_CONTINUE)
2195 return X86EMUL_UNHANDLEABLE;
2200 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2202 return assign_eip_near(ctxt, ctxt->src.val);
2205 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2210 old_eip = ctxt->_eip;
2211 rc = assign_eip_near(ctxt, ctxt->src.val);
2212 if (rc != X86EMUL_CONTINUE)
2214 ctxt->src.val = old_eip;
2219 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2221 u64 old = ctxt->dst.orig_val64;
2223 if (ctxt->dst.bytes == 16)
2224 return X86EMUL_UNHANDLEABLE;
2226 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2227 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2228 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2229 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2230 ctxt->eflags &= ~X86_EFLAGS_ZF;
2232 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2233 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2235 ctxt->eflags |= X86_EFLAGS_ZF;
2237 return X86EMUL_CONTINUE;
2240 static int em_ret(struct x86_emulate_ctxt *ctxt)
2245 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2246 if (rc != X86EMUL_CONTINUE)
2249 return assign_eip_near(ctxt, eip);
2252 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2255 unsigned long eip, cs;
2256 int cpl = ctxt->ops->cpl(ctxt);
2257 struct desc_struct new_desc;
2259 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2260 if (rc != X86EMUL_CONTINUE)
2262 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2263 if (rc != X86EMUL_CONTINUE)
2265 /* Outer-privilege level return is not implemented */
2266 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2267 return X86EMUL_UNHANDLEABLE;
2268 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2271 if (rc != X86EMUL_CONTINUE)
2273 rc = assign_eip_far(ctxt, eip, &new_desc);
2274 /* Error handling is not implemented. */
2275 if (rc != X86EMUL_CONTINUE)
2276 return X86EMUL_UNHANDLEABLE;
2281 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2285 rc = em_ret_far(ctxt);
2286 if (rc != X86EMUL_CONTINUE)
2288 rsp_increment(ctxt, ctxt->src.val);
2289 return X86EMUL_CONTINUE;
2292 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2294 /* Save real source value, then compare EAX against destination. */
2295 ctxt->dst.orig_val = ctxt->dst.val;
2296 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2297 ctxt->src.orig_val = ctxt->src.val;
2298 ctxt->src.val = ctxt->dst.orig_val;
2299 fastop(ctxt, em_cmp);
2301 if (ctxt->eflags & X86_EFLAGS_ZF) {
2302 /* Success: write back to memory; no update of EAX */
2303 ctxt->src.type = OP_NONE;
2304 ctxt->dst.val = ctxt->src.orig_val;
2306 /* Failure: write the value we saw to EAX. */
2307 ctxt->src.type = OP_REG;
2308 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2309 ctxt->src.val = ctxt->dst.orig_val;
2310 /* Create write-cycle to dest by writing the same value */
2311 ctxt->dst.val = ctxt->dst.orig_val;
2313 return X86EMUL_CONTINUE;
2316 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2318 int seg = ctxt->src2.val;
2322 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2324 rc = load_segment_descriptor(ctxt, sel, seg);
2325 if (rc != X86EMUL_CONTINUE)
2328 ctxt->dst.val = ctxt->src.val;
2332 static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
2334 u32 eax, ebx, ecx, edx;
2338 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2339 return edx & bit(X86_FEATURE_LM);
2342 static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
2344 desc->g = (flags >> 23) & 1;
2345 desc->d = (flags >> 22) & 1;
2346 desc->l = (flags >> 21) & 1;
2347 desc->avl = (flags >> 20) & 1;
2348 desc->p = (flags >> 15) & 1;
2349 desc->dpl = (flags >> 13) & 3;
2350 desc->s = (flags >> 12) & 1;
2351 desc->type = (flags >> 8) & 15;
2354 static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate,
2357 struct desc_struct desc;
2361 selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4);
2364 offset = 0x7f84 + n * 12;
2366 offset = 0x7f2c + (n - 3) * 12;
2368 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
2369 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
2370 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset));
2371 ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
2372 return X86EMUL_CONTINUE;
2375 static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate,
2378 struct desc_struct desc;
2383 offset = 0x7e00 + n * 16;
2385 selector = GET_SMSTATE(u16, smstate, offset);
2386 rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8);
2387 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
2388 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
2389 base3 = GET_SMSTATE(u32, smstate, offset + 12);
2391 ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
2392 return X86EMUL_CONTINUE;
2395 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2396 u64 cr0, u64 cr3, u64 cr4)
2401 /* In order to later set CR4.PCIDE, CR3[11:0] must be zero. */
2403 if (cr4 & X86_CR4_PCIDE) {
2408 bad = ctxt->ops->set_cr(ctxt, 3, cr3);
2410 return X86EMUL_UNHANDLEABLE;
2413 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
2414 * Then enable protected mode. However, PCID cannot be enabled
2415 * if EFER.LMA=0, so set it separately.
2417 bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2419 return X86EMUL_UNHANDLEABLE;
2421 bad = ctxt->ops->set_cr(ctxt, 0, cr0);
2423 return X86EMUL_UNHANDLEABLE;
2425 if (cr4 & X86_CR4_PCIDE) {
2426 bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2428 return X86EMUL_UNHANDLEABLE;
2430 bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
2432 return X86EMUL_UNHANDLEABLE;
2437 return X86EMUL_CONTINUE;
2440 static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
2441 const char *smstate)
2443 struct desc_struct desc;
2446 u32 val, cr0, cr3, cr4;
2449 cr0 = GET_SMSTATE(u32, smstate, 0x7ffc);
2450 cr3 = GET_SMSTATE(u32, smstate, 0x7ff8);
2451 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED;
2452 ctxt->_eip = GET_SMSTATE(u32, smstate, 0x7ff0);
2454 for (i = 0; i < 8; i++)
2455 *reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
2457 val = GET_SMSTATE(u32, smstate, 0x7fcc);
2458 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2459 val = GET_SMSTATE(u32, smstate, 0x7fc8);
2460 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2462 selector = GET_SMSTATE(u32, smstate, 0x7fc4);
2463 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64));
2464 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f60));
2465 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f5c));
2466 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
2468 selector = GET_SMSTATE(u32, smstate, 0x7fc0);
2469 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f80));
2470 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f7c));
2471 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f78));
2472 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
2474 dt.address = GET_SMSTATE(u32, smstate, 0x7f74);
2475 dt.size = GET_SMSTATE(u32, smstate, 0x7f70);
2476 ctxt->ops->set_gdt(ctxt, &dt);
2478 dt.address = GET_SMSTATE(u32, smstate, 0x7f58);
2479 dt.size = GET_SMSTATE(u32, smstate, 0x7f54);
2480 ctxt->ops->set_idt(ctxt, &dt);
2482 for (i = 0; i < 6; i++) {
2483 int r = rsm_load_seg_32(ctxt, smstate, i);
2484 if (r != X86EMUL_CONTINUE)
2488 cr4 = GET_SMSTATE(u32, smstate, 0x7f14);
2490 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8));
2492 return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2495 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
2496 const char *smstate)
2498 struct desc_struct desc;
2500 u64 val, cr0, cr3, cr4;
2505 for (i = 0; i < 16; i++)
2506 *reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8);
2508 ctxt->_eip = GET_SMSTATE(u64, smstate, 0x7f78);
2509 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
2511 val = GET_SMSTATE(u32, smstate, 0x7f68);
2512 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2513 val = GET_SMSTATE(u32, smstate, 0x7f60);
2514 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2516 cr0 = GET_SMSTATE(u64, smstate, 0x7f58);
2517 cr3 = GET_SMSTATE(u64, smstate, 0x7f50);
2518 cr4 = GET_SMSTATE(u64, smstate, 0x7f48);
2519 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
2520 val = GET_SMSTATE(u64, smstate, 0x7ed0);
2521 ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
2523 selector = GET_SMSTATE(u32, smstate, 0x7e90);
2524 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8);
2525 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e94));
2526 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e98));
2527 base3 = GET_SMSTATE(u32, smstate, 0x7e9c);
2528 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
2530 dt.size = GET_SMSTATE(u32, smstate, 0x7e84);
2531 dt.address = GET_SMSTATE(u64, smstate, 0x7e88);
2532 ctxt->ops->set_idt(ctxt, &dt);
2534 selector = GET_SMSTATE(u32, smstate, 0x7e70);
2535 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e72) << 8);
2536 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e74));
2537 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e78));
2538 base3 = GET_SMSTATE(u32, smstate, 0x7e7c);
2539 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
2541 dt.size = GET_SMSTATE(u32, smstate, 0x7e64);
2542 dt.address = GET_SMSTATE(u64, smstate, 0x7e68);
2543 ctxt->ops->set_gdt(ctxt, &dt);
2545 r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2546 if (r != X86EMUL_CONTINUE)
2549 for (i = 0; i < 6; i++) {
2550 r = rsm_load_seg_64(ctxt, smstate, i);
2551 if (r != X86EMUL_CONTINUE)
2555 return X86EMUL_CONTINUE;
2558 static int em_rsm(struct x86_emulate_ctxt *ctxt)
2560 unsigned long cr0, cr4, efer;
2565 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
2566 return emulate_ud(ctxt);
2568 smbase = ctxt->ops->get_smbase(ctxt);
2570 ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf));
2571 if (ret != X86EMUL_CONTINUE)
2572 return X86EMUL_UNHANDLEABLE;
2575 * Get back to real mode, to prepare a safe state in which to load
2576 * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
2577 * supports long mode.
2579 cr4 = ctxt->ops->get_cr(ctxt, 4);
2580 if (emulator_has_longmode(ctxt)) {
2581 struct desc_struct cs_desc;
2583 /* Zero CR4.PCIDE before CR0.PG. */
2584 if (cr4 & X86_CR4_PCIDE) {
2585 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2586 cr4 &= ~X86_CR4_PCIDE;
2589 /* A 32-bit code segment is required to clear EFER.LMA. */
2590 memset(&cs_desc, 0, sizeof(cs_desc));
2592 cs_desc.s = cs_desc.g = cs_desc.p = 1;
2593 ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
2596 /* For the 64-bit case, this will clear EFER.LMA. */
2597 cr0 = ctxt->ops->get_cr(ctxt, 0);
2598 if (cr0 & X86_CR0_PE)
2599 ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2601 /* Now clear CR4.PAE (which must be done before clearing EFER.LME). */
2602 if (cr4 & X86_CR4_PAE)
2603 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2605 /* And finally go back to 32-bit mode. */
2607 ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2610 * Give pre_leave_smm() a chance to make ISA-specific changes to the
2611 * vCPU state (e.g. enter guest mode) before loading state from the SMM
2614 if (ctxt->ops->pre_leave_smm(ctxt, buf))
2615 return X86EMUL_UNHANDLEABLE;
2617 if (emulator_has_longmode(ctxt))
2618 ret = rsm_load_state_64(ctxt, buf);
2620 ret = rsm_load_state_32(ctxt, buf);
2622 if (ret != X86EMUL_CONTINUE) {
2623 /* FIXME: should triple fault */
2624 return X86EMUL_UNHANDLEABLE;
2627 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2628 ctxt->ops->set_nmi_mask(ctxt, false);
2630 ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
2631 ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
2632 return X86EMUL_CONTINUE;
2636 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2637 struct desc_struct *cs, struct desc_struct *ss)
2639 cs->l = 0; /* will be adjusted later */
2640 set_desc_base(cs, 0); /* flat segment */
2641 cs->g = 1; /* 4kb granularity */
2642 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2643 cs->type = 0x0b; /* Read, Execute, Accessed */
2645 cs->dpl = 0; /* will be adjusted later */
2650 set_desc_base(ss, 0); /* flat segment */
2651 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2652 ss->g = 1; /* 4kb granularity */
2654 ss->type = 0x03; /* Read/Write, Accessed */
2655 ss->d = 1; /* 32bit stack segment */
2662 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2664 u32 eax, ebx, ecx, edx;
2667 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2668 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2669 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2670 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2673 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2675 const struct x86_emulate_ops *ops = ctxt->ops;
2676 u32 eax, ebx, ecx, edx;
2679 * syscall should always be enabled in longmode - so only become
2680 * vendor specific (cpuid) if other modes are active...
2682 if (ctxt->mode == X86EMUL_MODE_PROT64)
2687 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2689 * Intel ("GenuineIntel")
2690 * remark: Intel CPUs only support "syscall" in 64bit
2691 * longmode. Also an 64bit guest with a
2692 * 32bit compat-app running will #UD !! While this
2693 * behaviour can be fixed (by emulating) into AMD
2694 * response - CPUs of AMD can't behave like Intel.
2696 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2697 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2698 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2701 /* AMD ("AuthenticAMD") */
2702 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2703 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2704 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2707 /* AMD ("AMDisbetter!") */
2708 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2709 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2710 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2713 /* Hygon ("HygonGenuine") */
2714 if (ebx == X86EMUL_CPUID_VENDOR_HygonGenuine_ebx &&
2715 ecx == X86EMUL_CPUID_VENDOR_HygonGenuine_ecx &&
2716 edx == X86EMUL_CPUID_VENDOR_HygonGenuine_edx)
2720 * default: (not Intel, not AMD, not Hygon), apply Intel's
2726 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2728 const struct x86_emulate_ops *ops = ctxt->ops;
2729 struct desc_struct cs, ss;
2734 /* syscall is not available in real mode */
2735 if (ctxt->mode == X86EMUL_MODE_REAL ||
2736 ctxt->mode == X86EMUL_MODE_VM86)
2737 return emulate_ud(ctxt);
2739 if (!(em_syscall_is_enabled(ctxt)))
2740 return emulate_ud(ctxt);
2742 ops->get_msr(ctxt, MSR_EFER, &efer);
2743 setup_syscalls_segments(ctxt, &cs, &ss);
2745 if (!(efer & EFER_SCE))
2746 return emulate_ud(ctxt);
2748 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2750 cs_sel = (u16)(msr_data & 0xfffc);
2751 ss_sel = (u16)(msr_data + 8);
2753 if (efer & EFER_LMA) {
2757 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2758 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2760 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2761 if (efer & EFER_LMA) {
2762 #ifdef CONFIG_X86_64
2763 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2766 ctxt->mode == X86EMUL_MODE_PROT64 ?
2767 MSR_LSTAR : MSR_CSTAR, &msr_data);
2768 ctxt->_eip = msr_data;
2770 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2771 ctxt->eflags &= ~msr_data;
2772 ctxt->eflags |= X86_EFLAGS_FIXED;
2776 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2777 ctxt->_eip = (u32)msr_data;
2779 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2782 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2783 return X86EMUL_CONTINUE;
2786 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2788 const struct x86_emulate_ops *ops = ctxt->ops;
2789 struct desc_struct cs, ss;
2794 ops->get_msr(ctxt, MSR_EFER, &efer);
2795 /* inject #GP if in real mode */
2796 if (ctxt->mode == X86EMUL_MODE_REAL)
2797 return emulate_gp(ctxt, 0);
2800 * Not recognized on AMD in compat mode (but is recognized in legacy
2803 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2804 && !vendor_intel(ctxt))
2805 return emulate_ud(ctxt);
2807 /* sysenter/sysexit have not been tested in 64bit mode. */
2808 if (ctxt->mode == X86EMUL_MODE_PROT64)
2809 return X86EMUL_UNHANDLEABLE;
2811 setup_syscalls_segments(ctxt, &cs, &ss);
2813 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2814 if ((msr_data & 0xfffc) == 0x0)
2815 return emulate_gp(ctxt, 0);
2817 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2818 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2819 ss_sel = cs_sel + 8;
2820 if (efer & EFER_LMA) {
2825 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2826 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2828 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2829 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2831 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2832 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2835 return X86EMUL_CONTINUE;
2838 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2840 const struct x86_emulate_ops *ops = ctxt->ops;
2841 struct desc_struct cs, ss;
2842 u64 msr_data, rcx, rdx;
2844 u16 cs_sel = 0, ss_sel = 0;
2846 /* inject #GP if in real mode or Virtual 8086 mode */
2847 if (ctxt->mode == X86EMUL_MODE_REAL ||
2848 ctxt->mode == X86EMUL_MODE_VM86)
2849 return emulate_gp(ctxt, 0);
2851 setup_syscalls_segments(ctxt, &cs, &ss);
2853 if ((ctxt->rex_prefix & 0x8) != 0x0)
2854 usermode = X86EMUL_MODE_PROT64;
2856 usermode = X86EMUL_MODE_PROT32;
2858 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2859 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2863 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2865 case X86EMUL_MODE_PROT32:
2866 cs_sel = (u16)(msr_data + 16);
2867 if ((msr_data & 0xfffc) == 0x0)
2868 return emulate_gp(ctxt, 0);
2869 ss_sel = (u16)(msr_data + 24);
2873 case X86EMUL_MODE_PROT64:
2874 cs_sel = (u16)(msr_data + 32);
2875 if (msr_data == 0x0)
2876 return emulate_gp(ctxt, 0);
2877 ss_sel = cs_sel + 8;
2880 if (emul_is_noncanonical_address(rcx, ctxt) ||
2881 emul_is_noncanonical_address(rdx, ctxt))
2882 return emulate_gp(ctxt, 0);
2885 cs_sel |= SEGMENT_RPL_MASK;
2886 ss_sel |= SEGMENT_RPL_MASK;
2888 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2889 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2892 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2894 return X86EMUL_CONTINUE;
2897 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2900 if (ctxt->mode == X86EMUL_MODE_REAL)
2902 if (ctxt->mode == X86EMUL_MODE_VM86)
2904 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2905 return ctxt->ops->cpl(ctxt) > iopl;
2908 #define VMWARE_PORT_VMPORT (0x5658)
2909 #define VMWARE_PORT_VMRPC (0x5659)
2911 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2914 const struct x86_emulate_ops *ops = ctxt->ops;
2915 struct desc_struct tr_seg;
2918 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2919 unsigned mask = (1 << len) - 1;
2923 * VMware allows access to these ports even if denied
2924 * by TSS I/O permission bitmap. Mimic behavior.
2926 if (enable_vmware_backdoor &&
2927 ((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC)))
2930 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2933 if (desc_limit_scaled(&tr_seg) < 103)
2935 base = get_desc_base(&tr_seg);
2936 #ifdef CONFIG_X86_64
2937 base |= ((u64)base3) << 32;
2939 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
2940 if (r != X86EMUL_CONTINUE)
2942 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2944 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
2945 if (r != X86EMUL_CONTINUE)
2947 if ((perm >> bit_idx) & mask)
2952 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2958 if (emulator_bad_iopl(ctxt))
2959 if (!emulator_io_port_access_allowed(ctxt, port, len))
2962 ctxt->perm_ok = true;
2967 static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2970 * Intel CPUs mask the counter and pointers in quite strange
2971 * manner when ECX is zero due to REP-string optimizations.
2973 #ifdef CONFIG_X86_64
2974 if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
2977 *reg_write(ctxt, VCPU_REGS_RCX) = 0;
2980 case 0xa4: /* movsb */
2981 case 0xa5: /* movsd/w */
2982 *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
2984 case 0xaa: /* stosb */
2985 case 0xab: /* stosd/w */
2986 *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
2991 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2992 struct tss_segment_16 *tss)
2994 tss->ip = ctxt->_eip;
2995 tss->flag = ctxt->eflags;
2996 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2997 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2998 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2999 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
3000 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
3001 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
3002 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
3003 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
3005 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3006 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3007 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3008 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3009 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
3012 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
3013 struct tss_segment_16 *tss)
3018 ctxt->_eip = tss->ip;
3019 ctxt->eflags = tss->flag | 2;
3020 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
3021 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
3022 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
3023 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
3024 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
3025 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
3026 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
3027 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
3030 * SDM says that segment selectors are loaded before segment
3033 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
3034 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3035 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3036 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3037 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3042 * Now load segment descriptors. If fault happens at this stage
3043 * it is handled in a context of new task
3045 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
3046 X86_TRANSFER_TASK_SWITCH, NULL);
3047 if (ret != X86EMUL_CONTINUE)
3049 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3050 X86_TRANSFER_TASK_SWITCH, NULL);
3051 if (ret != X86EMUL_CONTINUE)
3053 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3054 X86_TRANSFER_TASK_SWITCH, NULL);
3055 if (ret != X86EMUL_CONTINUE)
3057 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3058 X86_TRANSFER_TASK_SWITCH, NULL);
3059 if (ret != X86EMUL_CONTINUE)
3061 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3062 X86_TRANSFER_TASK_SWITCH, NULL);
3063 if (ret != X86EMUL_CONTINUE)
3066 return X86EMUL_CONTINUE;
3069 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
3070 u16 tss_selector, u16 old_tss_sel,
3071 ulong old_tss_base, struct desc_struct *new_desc)
3073 struct tss_segment_16 tss_seg;
3075 u32 new_tss_base = get_desc_base(new_desc);
3077 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3078 if (ret != X86EMUL_CONTINUE)
3081 save_state_to_tss16(ctxt, &tss_seg);
3083 ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3084 if (ret != X86EMUL_CONTINUE)
3087 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3088 if (ret != X86EMUL_CONTINUE)
3091 if (old_tss_sel != 0xffff) {
3092 tss_seg.prev_task_link = old_tss_sel;
3094 ret = linear_write_system(ctxt, new_tss_base,
3095 &tss_seg.prev_task_link,
3096 sizeof(tss_seg.prev_task_link));
3097 if (ret != X86EMUL_CONTINUE)
3101 return load_state_from_tss16(ctxt, &tss_seg);
3104 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
3105 struct tss_segment_32 *tss)
3107 /* CR3 and ldt selector are not saved intentionally */
3108 tss->eip = ctxt->_eip;
3109 tss->eflags = ctxt->eflags;
3110 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
3111 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
3112 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
3113 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
3114 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
3115 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
3116 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
3117 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3119 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3120 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3121 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3122 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3123 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
3124 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3127 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
3128 struct tss_segment_32 *tss)
3133 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3134 return emulate_gp(ctxt, 0);
3135 ctxt->_eip = tss->eip;
3136 ctxt->eflags = tss->eflags | 2;
3138 /* General purpose registers */
3139 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
3140 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
3141 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
3142 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
3143 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
3144 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
3145 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
3146 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3149 * SDM says that segment selectors are loaded before segment
3150 * descriptors. This is important because CPL checks will
3153 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
3154 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3155 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3156 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3157 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3158 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
3159 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3162 * If we're switching between Protected Mode and VM86, we need to make
3163 * sure to update the mode before loading the segment descriptors so
3164 * that the selectors are interpreted correctly.
3166 if (ctxt->eflags & X86_EFLAGS_VM) {
3167 ctxt->mode = X86EMUL_MODE_VM86;
3170 ctxt->mode = X86EMUL_MODE_PROT32;
3175 * Now load segment descriptors. If fault happenes at this stage
3176 * it is handled in a context of new task
3178 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3179 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3180 if (ret != X86EMUL_CONTINUE)
3182 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3183 X86_TRANSFER_TASK_SWITCH, NULL);
3184 if (ret != X86EMUL_CONTINUE)
3186 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3187 X86_TRANSFER_TASK_SWITCH, NULL);
3188 if (ret != X86EMUL_CONTINUE)
3190 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3191 X86_TRANSFER_TASK_SWITCH, NULL);
3192 if (ret != X86EMUL_CONTINUE)
3194 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3195 X86_TRANSFER_TASK_SWITCH, NULL);
3196 if (ret != X86EMUL_CONTINUE)
3198 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3199 X86_TRANSFER_TASK_SWITCH, NULL);
3200 if (ret != X86EMUL_CONTINUE)
3202 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3203 X86_TRANSFER_TASK_SWITCH, NULL);
3208 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
3209 u16 tss_selector, u16 old_tss_sel,
3210 ulong old_tss_base, struct desc_struct *new_desc)
3212 struct tss_segment_32 tss_seg;
3214 u32 new_tss_base = get_desc_base(new_desc);
3215 u32 eip_offset = offsetof(struct tss_segment_32, eip);
3216 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3218 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3219 if (ret != X86EMUL_CONTINUE)
3222 save_state_to_tss32(ctxt, &tss_seg);
3224 /* Only GP registers and segment selectors are saved */
3225 ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3226 ldt_sel_offset - eip_offset);
3227 if (ret != X86EMUL_CONTINUE)
3230 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3231 if (ret != X86EMUL_CONTINUE)
3234 if (old_tss_sel != 0xffff) {
3235 tss_seg.prev_task_link = old_tss_sel;
3237 ret = linear_write_system(ctxt, new_tss_base,
3238 &tss_seg.prev_task_link,
3239 sizeof(tss_seg.prev_task_link));
3240 if (ret != X86EMUL_CONTINUE)
3244 return load_state_from_tss32(ctxt, &tss_seg);
3247 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3248 u16 tss_selector, int idt_index, int reason,
3249 bool has_error_code, u32 error_code)
3251 const struct x86_emulate_ops *ops = ctxt->ops;
3252 struct desc_struct curr_tss_desc, next_tss_desc;
3254 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3255 ulong old_tss_base =
3256 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3258 ulong desc_addr, dr7;
3260 /* FIXME: old_tss_base == ~0 ? */
3262 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3263 if (ret != X86EMUL_CONTINUE)
3265 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3266 if (ret != X86EMUL_CONTINUE)
3269 /* FIXME: check that next_tss_desc is tss */
3272 * Check privileges. The three cases are task switch caused by...
3274 * 1. jmp/call/int to task gate: Check against DPL of the task gate
3275 * 2. Exception/IRQ/iret: No check is performed
3276 * 3. jmp/call to TSS/task-gate: No check is performed since the
3277 * hardware checks it before exiting.
3279 if (reason == TASK_SWITCH_GATE) {
3280 if (idt_index != -1) {
3281 /* Software interrupts */
3282 struct desc_struct task_gate_desc;
3285 ret = read_interrupt_descriptor(ctxt, idt_index,
3287 if (ret != X86EMUL_CONTINUE)
3290 dpl = task_gate_desc.dpl;
3291 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
3292 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
3296 desc_limit = desc_limit_scaled(&next_tss_desc);
3297 if (!next_tss_desc.p ||
3298 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
3299 desc_limit < 0x2b)) {
3300 return emulate_ts(ctxt, tss_selector & 0xfffc);
3303 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3304 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
3305 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3308 if (reason == TASK_SWITCH_IRET)
3309 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
3311 /* set back link to prev task only if NT bit is set in eflags
3312 note that old_tss_sel is not used after this point */
3313 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
3314 old_tss_sel = 0xffff;
3316 if (next_tss_desc.type & 8)
3317 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3318 old_tss_base, &next_tss_desc);
3320 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3321 old_tss_base, &next_tss_desc);
3322 if (ret != X86EMUL_CONTINUE)
3325 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
3326 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3328 if (reason != TASK_SWITCH_IRET) {
3329 next_tss_desc.type |= (1 << 1); /* set busy flag */
3330 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3333 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
3334 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3336 if (has_error_code) {
3337 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3338 ctxt->lock_prefix = 0;
3339 ctxt->src.val = (unsigned long) error_code;
3340 ret = em_push(ctxt);
3343 ops->get_dr(ctxt, 7, &dr7);
3344 ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3349 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3350 u16 tss_selector, int idt_index, int reason,
3351 bool has_error_code, u32 error_code)
3355 invalidate_registers(ctxt);
3356 ctxt->_eip = ctxt->eip;
3357 ctxt->dst.type = OP_NONE;
3359 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3360 has_error_code, error_code);
3362 if (rc == X86EMUL_CONTINUE) {
3363 ctxt->eip = ctxt->_eip;
3364 writeback_registers(ctxt);
3367 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3370 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3373 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3375 register_address_increment(ctxt, reg, df * op->bytes);
3376 op->addr.mem.ea = register_address(ctxt, reg);
3379 static int em_das(struct x86_emulate_ctxt *ctxt)
3382 bool af, cf, old_cf;
3384 cf = ctxt->eflags & X86_EFLAGS_CF;
3390 af = ctxt->eflags & X86_EFLAGS_AF;
3391 if ((al & 0x0f) > 9 || af) {
3393 cf = old_cf | (al >= 250);
3398 if (old_al > 0x99 || old_cf) {
3404 /* Set PF, ZF, SF */
3405 ctxt->src.type = OP_IMM;
3407 ctxt->src.bytes = 1;
3408 fastop(ctxt, em_or);
3409 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3411 ctxt->eflags |= X86_EFLAGS_CF;
3413 ctxt->eflags |= X86_EFLAGS_AF;
3414 return X86EMUL_CONTINUE;
3417 static int em_aam(struct x86_emulate_ctxt *ctxt)
3421 if (ctxt->src.val == 0)
3422 return emulate_de(ctxt);
3424 al = ctxt->dst.val & 0xff;
3425 ah = al / ctxt->src.val;
3426 al %= ctxt->src.val;
3428 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3430 /* Set PF, ZF, SF */
3431 ctxt->src.type = OP_IMM;
3433 ctxt->src.bytes = 1;
3434 fastop(ctxt, em_or);
3436 return X86EMUL_CONTINUE;
3439 static int em_aad(struct x86_emulate_ctxt *ctxt)
3441 u8 al = ctxt->dst.val & 0xff;
3442 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3444 al = (al + (ah * ctxt->src.val)) & 0xff;
3446 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3448 /* Set PF, ZF, SF */
3449 ctxt->src.type = OP_IMM;
3451 ctxt->src.bytes = 1;
3452 fastop(ctxt, em_or);
3454 return X86EMUL_CONTINUE;
3457 static int em_call(struct x86_emulate_ctxt *ctxt)
3460 long rel = ctxt->src.val;
3462 ctxt->src.val = (unsigned long)ctxt->_eip;
3463 rc = jmp_rel(ctxt, rel);
3464 if (rc != X86EMUL_CONTINUE)
3466 return em_push(ctxt);
3469 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3474 struct desc_struct old_desc, new_desc;
3475 const struct x86_emulate_ops *ops = ctxt->ops;
3476 int cpl = ctxt->ops->cpl(ctxt);
3477 enum x86emul_mode prev_mode = ctxt->mode;
3479 old_eip = ctxt->_eip;
3480 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3482 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3483 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3484 X86_TRANSFER_CALL_JMP, &new_desc);
3485 if (rc != X86EMUL_CONTINUE)
3488 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3489 if (rc != X86EMUL_CONTINUE)
3492 ctxt->src.val = old_cs;
3494 if (rc != X86EMUL_CONTINUE)
3497 ctxt->src.val = old_eip;
3499 /* If we failed, we tainted the memory, but the very least we should
3501 if (rc != X86EMUL_CONTINUE) {
3502 pr_warn_once("faulting far call emulation tainted memory\n");
3507 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3508 ctxt->mode = prev_mode;
3513 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3518 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3519 if (rc != X86EMUL_CONTINUE)
3521 rc = assign_eip_near(ctxt, eip);
3522 if (rc != X86EMUL_CONTINUE)
3524 rsp_increment(ctxt, ctxt->src.val);
3525 return X86EMUL_CONTINUE;
3528 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3530 /* Write back the register source. */
3531 ctxt->src.val = ctxt->dst.val;
3532 write_register_operand(&ctxt->src);
3534 /* Write back the memory destination with implicit LOCK prefix. */
3535 ctxt->dst.val = ctxt->src.orig_val;
3536 ctxt->lock_prefix = 1;
3537 return X86EMUL_CONTINUE;
3540 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3542 ctxt->dst.val = ctxt->src2.val;
3543 return fastop(ctxt, em_imul);
3546 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3548 ctxt->dst.type = OP_REG;
3549 ctxt->dst.bytes = ctxt->src.bytes;
3550 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3551 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3553 return X86EMUL_CONTINUE;
3556 static int em_rdpid(struct x86_emulate_ctxt *ctxt)
3560 if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
3561 return emulate_gp(ctxt, 0);
3562 ctxt->dst.val = tsc_aux;
3563 return X86EMUL_CONTINUE;
3566 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3570 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3571 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3572 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3573 return X86EMUL_CONTINUE;
3576 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3580 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3581 return emulate_gp(ctxt, 0);
3582 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3583 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3584 return X86EMUL_CONTINUE;
3587 static int em_mov(struct x86_emulate_ctxt *ctxt)
3589 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3590 return X86EMUL_CONTINUE;
3593 #define FFL(x) bit(X86_FEATURE_##x)
3595 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3597 u32 ebx, ecx, edx, eax = 1;
3601 * Check MOVBE is set in the guest-visible CPUID leaf.
3603 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3604 if (!(ecx & FFL(MOVBE)))
3605 return emulate_ud(ctxt);
3607 switch (ctxt->op_bytes) {
3610 * From MOVBE definition: "...When the operand size is 16 bits,
3611 * the upper word of the destination register remains unchanged
3614 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3615 * rules so we have to do the operation almost per hand.
3617 tmp = (u16)ctxt->src.val;
3618 ctxt->dst.val &= ~0xffffUL;
3619 ctxt->dst.val |= (unsigned long)swab16(tmp);
3622 ctxt->dst.val = swab32((u32)ctxt->src.val);
3625 ctxt->dst.val = swab64(ctxt->src.val);
3630 return X86EMUL_CONTINUE;
3633 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3635 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3636 return emulate_gp(ctxt, 0);
3638 /* Disable writeback. */
3639 ctxt->dst.type = OP_NONE;
3640 return X86EMUL_CONTINUE;
3643 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3647 if (ctxt->mode == X86EMUL_MODE_PROT64)
3648 val = ctxt->src.val & ~0ULL;
3650 val = ctxt->src.val & ~0U;
3652 /* #UD condition is already handled. */
3653 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3654 return emulate_gp(ctxt, 0);
3656 /* Disable writeback. */
3657 ctxt->dst.type = OP_NONE;
3658 return X86EMUL_CONTINUE;
3661 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3665 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3666 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3667 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3668 return emulate_gp(ctxt, 0);
3670 return X86EMUL_CONTINUE;
3673 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3677 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3678 return emulate_gp(ctxt, 0);
3680 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3681 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3682 return X86EMUL_CONTINUE;
3685 static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
3687 if (segment > VCPU_SREG_GS &&
3688 (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3689 ctxt->ops->cpl(ctxt) > 0)
3690 return emulate_gp(ctxt, 0);
3692 ctxt->dst.val = get_segment_selector(ctxt, segment);
3693 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3694 ctxt->dst.bytes = 2;
3695 return X86EMUL_CONTINUE;
3698 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3700 if (ctxt->modrm_reg > VCPU_SREG_GS)
3701 return emulate_ud(ctxt);
3703 return em_store_sreg(ctxt, ctxt->modrm_reg);
3706 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3708 u16 sel = ctxt->src.val;
3710 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3711 return emulate_ud(ctxt);
3713 if (ctxt->modrm_reg == VCPU_SREG_SS)
3714 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3716 /* Disable writeback. */
3717 ctxt->dst.type = OP_NONE;
3718 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3721 static int em_sldt(struct x86_emulate_ctxt *ctxt)
3723 return em_store_sreg(ctxt, VCPU_SREG_LDTR);
3726 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3728 u16 sel = ctxt->src.val;
3730 /* Disable writeback. */
3731 ctxt->dst.type = OP_NONE;
3732 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3735 static int em_str(struct x86_emulate_ctxt *ctxt)
3737 return em_store_sreg(ctxt, VCPU_SREG_TR);
3740 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3742 u16 sel = ctxt->src.val;
3744 /* Disable writeback. */
3745 ctxt->dst.type = OP_NONE;
3746 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3749 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3754 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3755 if (rc == X86EMUL_CONTINUE)
3756 ctxt->ops->invlpg(ctxt, linear);
3757 /* Disable writeback. */
3758 ctxt->dst.type = OP_NONE;
3759 return X86EMUL_CONTINUE;
3762 static int em_clts(struct x86_emulate_ctxt *ctxt)
3766 cr0 = ctxt->ops->get_cr(ctxt, 0);
3768 ctxt->ops->set_cr(ctxt, 0, cr0);
3769 return X86EMUL_CONTINUE;
3772 static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3774 int rc = ctxt->ops->fix_hypercall(ctxt);
3776 if (rc != X86EMUL_CONTINUE)
3779 /* Let the processor re-execute the fixed hypercall */
3780 ctxt->_eip = ctxt->eip;
3781 /* Disable writeback. */
3782 ctxt->dst.type = OP_NONE;
3783 return X86EMUL_CONTINUE;
3786 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3787 void (*get)(struct x86_emulate_ctxt *ctxt,
3788 struct desc_ptr *ptr))
3790 struct desc_ptr desc_ptr;
3792 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3793 ctxt->ops->cpl(ctxt) > 0)
3794 return emulate_gp(ctxt, 0);
3796 if (ctxt->mode == X86EMUL_MODE_PROT64)
3798 get(ctxt, &desc_ptr);
3799 if (ctxt->op_bytes == 2) {
3801 desc_ptr.address &= 0x00ffffff;
3803 /* Disable writeback. */
3804 ctxt->dst.type = OP_NONE;
3805 return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3806 &desc_ptr, 2 + ctxt->op_bytes);
3809 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3811 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3814 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3816 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3819 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3821 struct desc_ptr desc_ptr;
3824 if (ctxt->mode == X86EMUL_MODE_PROT64)
3826 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3827 &desc_ptr.size, &desc_ptr.address,
3829 if (rc != X86EMUL_CONTINUE)
3831 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3832 emul_is_noncanonical_address(desc_ptr.address, ctxt))
3833 return emulate_gp(ctxt, 0);
3835 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3837 ctxt->ops->set_idt(ctxt, &desc_ptr);
3838 /* Disable writeback. */
3839 ctxt->dst.type = OP_NONE;
3840 return X86EMUL_CONTINUE;
3843 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3845 return em_lgdt_lidt(ctxt, true);
3848 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3850 return em_lgdt_lidt(ctxt, false);
3853 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3855 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3856 ctxt->ops->cpl(ctxt) > 0)
3857 return emulate_gp(ctxt, 0);
3859 if (ctxt->dst.type == OP_MEM)
3860 ctxt->dst.bytes = 2;
3861 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3862 return X86EMUL_CONTINUE;
3865 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3867 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3868 | (ctxt->src.val & 0x0f));
3869 ctxt->dst.type = OP_NONE;
3870 return X86EMUL_CONTINUE;
3873 static int em_loop(struct x86_emulate_ctxt *ctxt)
3875 int rc = X86EMUL_CONTINUE;
3877 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3878 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3879 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3880 rc = jmp_rel(ctxt, ctxt->src.val);
3885 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3887 int rc = X86EMUL_CONTINUE;
3889 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3890 rc = jmp_rel(ctxt, ctxt->src.val);
3895 static int em_in(struct x86_emulate_ctxt *ctxt)
3897 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3899 return X86EMUL_IO_NEEDED;
3901 return X86EMUL_CONTINUE;
3904 static int em_out(struct x86_emulate_ctxt *ctxt)
3906 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3908 /* Disable writeback. */
3909 ctxt->dst.type = OP_NONE;
3910 return X86EMUL_CONTINUE;
3913 static int em_cli(struct x86_emulate_ctxt *ctxt)
3915 if (emulator_bad_iopl(ctxt))
3916 return emulate_gp(ctxt, 0);
3918 ctxt->eflags &= ~X86_EFLAGS_IF;
3919 return X86EMUL_CONTINUE;
3922 static int em_sti(struct x86_emulate_ctxt *ctxt)
3924 if (emulator_bad_iopl(ctxt))
3925 return emulate_gp(ctxt, 0);
3927 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3928 ctxt->eflags |= X86_EFLAGS_IF;
3929 return X86EMUL_CONTINUE;
3932 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3934 u32 eax, ebx, ecx, edx;
3937 ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3938 if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3939 ctxt->ops->cpl(ctxt)) {
3940 return emulate_gp(ctxt, 0);
3943 eax = reg_read(ctxt, VCPU_REGS_RAX);
3944 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3945 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
3946 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3947 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3948 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3949 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3950 return X86EMUL_CONTINUE;
3953 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3957 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3959 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3961 ctxt->eflags &= ~0xffUL;
3962 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3963 return X86EMUL_CONTINUE;
3966 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3968 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3969 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3970 return X86EMUL_CONTINUE;
3973 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3975 switch (ctxt->op_bytes) {
3976 #ifdef CONFIG_X86_64
3978 asm("bswap %0" : "+r"(ctxt->dst.val));
3982 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3985 return X86EMUL_CONTINUE;
3988 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3990 /* emulating clflush regardless of cpuid */
3991 return X86EMUL_CONTINUE;
3994 static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3996 ctxt->dst.val = (s32) ctxt->src.val;
3997 return X86EMUL_CONTINUE;
4000 static int check_fxsr(struct x86_emulate_ctxt *ctxt)
4002 u32 eax = 1, ebx, ecx = 0, edx;
4004 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
4005 if (!(edx & FFL(FXSR)))
4006 return emulate_ud(ctxt);
4008 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
4009 return emulate_nm(ctxt);
4012 * Don't emulate a case that should never be hit, instead of working
4013 * around a lack of fxsave64/fxrstor64 on old compilers.
4015 if (ctxt->mode >= X86EMUL_MODE_PROT64)
4016 return X86EMUL_UNHANDLEABLE;
4018 return X86EMUL_CONTINUE;
4022 * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
4023 * and restore MXCSR.
4025 static size_t __fxstate_size(int nregs)
4027 return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
4030 static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
4033 if (ctxt->mode == X86EMUL_MODE_PROT64)
4034 return __fxstate_size(16);
4036 cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
4037 return __fxstate_size(cr4_osfxsr ? 8 : 0);
4041 * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
4044 * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
4045 * preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
4047 * 3) 64-bit mode with REX.W prefix
4048 * - like (2), but XMM 8-15 are being saved and restored
4049 * 4) 64-bit mode without REX.W prefix
4050 * - like (3), but FIP and FDP are 64 bit
4052 * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
4053 * desired result. (4) is not emulated.
4055 * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
4056 * and FPU DS) should match.
4058 static int em_fxsave(struct x86_emulate_ctxt *ctxt)
4060 struct fxregs_state fx_state;
4063 rc = check_fxsr(ctxt);
4064 if (rc != X86EMUL_CONTINUE)
4067 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
4069 if (rc != X86EMUL_CONTINUE)
4072 return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
4073 fxstate_size(ctxt));
4077 * FXRSTOR might restore XMM registers not provided by the guest. Fill
4078 * in the host registers (via FXSAVE) instead, so they won't be modified.
4079 * (preemption has to stay disabled until FXRSTOR).
4081 * Use noinline to keep the stack for other functions called by callers small.
4083 static noinline int fxregs_fixup(struct fxregs_state *fx_state,
4084 const size_t used_size)
4086 struct fxregs_state fx_tmp;
4089 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
4090 memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
4091 __fxstate_size(16) - used_size);
4096 static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4098 struct fxregs_state fx_state;
4102 rc = check_fxsr(ctxt);
4103 if (rc != X86EMUL_CONTINUE)
4106 size = fxstate_size(ctxt);
4107 rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
4108 if (rc != X86EMUL_CONTINUE)
4111 if (size < __fxstate_size(16)) {
4112 rc = fxregs_fixup(&fx_state, size);
4113 if (rc != X86EMUL_CONTINUE)
4117 if (fx_state.mxcsr >> 16) {
4118 rc = emulate_gp(ctxt, 0);
4122 if (rc == X86EMUL_CONTINUE)
4123 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4129 static bool valid_cr(int nr)
4141 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
4143 if (!valid_cr(ctxt->modrm_reg))
4144 return emulate_ud(ctxt);
4146 return X86EMUL_CONTINUE;
4149 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
4151 u64 new_val = ctxt->src.val64;
4152 int cr = ctxt->modrm_reg;
4155 static u64 cr_reserved_bits[] = {
4156 0xffffffff00000000ULL,
4157 0, 0, 0, /* CR3 checked later */
4164 return emulate_ud(ctxt);
4166 if (new_val & cr_reserved_bits[cr])
4167 return emulate_gp(ctxt, 0);
4172 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
4173 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
4174 return emulate_gp(ctxt, 0);
4176 cr4 = ctxt->ops->get_cr(ctxt, 4);
4177 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4179 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
4180 !(cr4 & X86_CR4_PAE))
4181 return emulate_gp(ctxt, 0);
4188 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4189 if (efer & EFER_LMA) {
4191 u32 eax, ebx, ecx, edx;
4195 if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
4197 maxphyaddr = eax & 0xff;
4200 rsvd = rsvd_bits(maxphyaddr, 63);
4201 if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE)
4202 rsvd &= ~X86_CR3_PCID_NOFLUSH;
4206 return emulate_gp(ctxt, 0);
4211 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4213 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
4214 return emulate_gp(ctxt, 0);
4220 return X86EMUL_CONTINUE;
4223 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
4227 ctxt->ops->get_dr(ctxt, 7, &dr7);
4229 /* Check if DR7.Global_Enable is set */
4230 return dr7 & (1 << 13);
4233 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
4235 int dr = ctxt->modrm_reg;
4239 return emulate_ud(ctxt);
4241 cr4 = ctxt->ops->get_cr(ctxt, 4);
4242 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
4243 return emulate_ud(ctxt);
4245 if (check_dr7_gd(ctxt)) {
4248 ctxt->ops->get_dr(ctxt, 6, &dr6);
4250 dr6 |= DR6_BD | DR6_RTM;
4251 ctxt->ops->set_dr(ctxt, 6, dr6);
4252 return emulate_db(ctxt);
4255 return X86EMUL_CONTINUE;
4258 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
4260 u64 new_val = ctxt->src.val64;
4261 int dr = ctxt->modrm_reg;
4263 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
4264 return emulate_gp(ctxt, 0);
4266 return check_dr_read(ctxt);
4269 static int check_svme(struct x86_emulate_ctxt *ctxt)
4273 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4275 if (!(efer & EFER_SVME))
4276 return emulate_ud(ctxt);
4278 return X86EMUL_CONTINUE;
4281 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
4283 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4285 /* Valid physical address? */
4286 if (rax & 0xffff000000000000ULL)
4287 return emulate_gp(ctxt, 0);
4289 return check_svme(ctxt);
4292 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
4294 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4296 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4297 return emulate_ud(ctxt);
4299 return X86EMUL_CONTINUE;
4302 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
4304 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4305 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4308 * VMware allows access to these Pseduo-PMCs even when read via RDPMC
4309 * in Ring3 when CR4.PCE=0.
4311 if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
4312 return X86EMUL_CONTINUE;
4314 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4315 ctxt->ops->check_pmc(ctxt, rcx))
4316 return emulate_gp(ctxt, 0);
4318 return X86EMUL_CONTINUE;
4321 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
4323 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
4324 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4325 return emulate_gp(ctxt, 0);
4327 return X86EMUL_CONTINUE;
4330 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
4332 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
4333 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4334 return emulate_gp(ctxt, 0);
4336 return X86EMUL_CONTINUE;
4339 #define D(_y) { .flags = (_y) }
4340 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4341 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4342 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4343 #define N D(NotImpl)
4344 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4345 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4346 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4347 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4348 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4349 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4350 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4351 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4352 #define II(_f, _e, _i) \
4353 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4354 #define IIP(_f, _e, _i, _p) \
4355 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4356 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4357 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4359 #define D2bv(_f) D((_f) | ByteOp), D(_f)
4360 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4361 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
4362 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
4363 #define I2bvIP(_f, _e, _i, _p) \
4364 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4366 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4367 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4368 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4370 static const struct opcode group7_rm0[] = {
4372 I(SrcNone | Priv | EmulateOnUD, em_hypercall),
4376 static const struct opcode group7_rm1[] = {
4377 DI(SrcNone | Priv, monitor),
4378 DI(SrcNone | Priv, mwait),
4382 static const struct opcode group7_rm3[] = {
4383 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
4384 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
4385 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
4386 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
4387 DIP(SrcNone | Prot | Priv, stgi, check_svme),
4388 DIP(SrcNone | Prot | Priv, clgi, check_svme),
4389 DIP(SrcNone | Prot | Priv, skinit, check_svme),
4390 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
4393 static const struct opcode group7_rm7[] = {
4395 DIP(SrcNone, rdtscp, check_rdtsc),
4399 static const struct opcode group1[] = {
4401 F(Lock | PageTable, em_or),
4404 F(Lock | PageTable, em_and),
4410 static const struct opcode group1A[] = {
4411 I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4414 static const struct opcode group2[] = {
4415 F(DstMem | ModRM, em_rol),
4416 F(DstMem | ModRM, em_ror),
4417 F(DstMem | ModRM, em_rcl),
4418 F(DstMem | ModRM, em_rcr),
4419 F(DstMem | ModRM, em_shl),
4420 F(DstMem | ModRM, em_shr),
4421 F(DstMem | ModRM, em_shl),
4422 F(DstMem | ModRM, em_sar),
4425 static const struct opcode group3[] = {
4426 F(DstMem | SrcImm | NoWrite, em_test),
4427 F(DstMem | SrcImm | NoWrite, em_test),
4428 F(DstMem | SrcNone | Lock, em_not),
4429 F(DstMem | SrcNone | Lock, em_neg),
4430 F(DstXacc | Src2Mem, em_mul_ex),
4431 F(DstXacc | Src2Mem, em_imul_ex),
4432 F(DstXacc | Src2Mem, em_div_ex),
4433 F(DstXacc | Src2Mem, em_idiv_ex),
4436 static const struct opcode group4[] = {
4437 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4438 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4442 static const struct opcode group5[] = {
4443 F(DstMem | SrcNone | Lock, em_inc),
4444 F(DstMem | SrcNone | Lock, em_dec),
4445 I(SrcMem | NearBranch, em_call_near_abs),
4446 I(SrcMemFAddr | ImplicitOps, em_call_far),
4447 I(SrcMem | NearBranch, em_jmp_abs),
4448 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
4449 I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined),
4452 static const struct opcode group6[] = {
4453 II(Prot | DstMem, em_sldt, sldt),
4454 II(Prot | DstMem, em_str, str),
4455 II(Prot | Priv | SrcMem16, em_lldt, lldt),
4456 II(Prot | Priv | SrcMem16, em_ltr, ltr),
4460 static const struct group_dual group7 = { {
4461 II(Mov | DstMem, em_sgdt, sgdt),
4462 II(Mov | DstMem, em_sidt, sidt),
4463 II(SrcMem | Priv, em_lgdt, lgdt),
4464 II(SrcMem | Priv, em_lidt, lidt),
4465 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4466 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4467 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
4471 N, EXT(0, group7_rm3),
4472 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4473 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4477 static const struct opcode group8[] = {
4479 F(DstMem | SrcImmByte | NoWrite, em_bt),
4480 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
4481 F(DstMem | SrcImmByte | Lock, em_btr),
4482 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
4486 * The "memory" destination is actually always a register, since we come
4487 * from the register case of group9.
4489 static const struct gprefix pfx_0f_c7_7 = {
4490 N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdtscp),
4494 static const struct group_dual group9 = { {
4495 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4497 N, N, N, N, N, N, N,
4498 GP(0, &pfx_0f_c7_7),
4501 static const struct opcode group11[] = {
4502 I(DstMem | SrcImm | Mov | PageTable, em_mov),
4506 static const struct gprefix pfx_0f_ae_7 = {
4507 I(SrcMem | ByteOp, em_clflush), N, N, N,
4510 static const struct group_dual group15 = { {
4511 I(ModRM | Aligned16, em_fxsave),
4512 I(ModRM | Aligned16, em_fxrstor),
4513 N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4515 N, N, N, N, N, N, N, N,
4518 static const struct gprefix pfx_0f_6f_0f_7f = {
4519 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4522 static const struct instr_dual instr_dual_0f_2b = {
4526 static const struct gprefix pfx_0f_2b = {
4527 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4530 static const struct gprefix pfx_0f_10_0f_11 = {
4531 I(Unaligned, em_mov), I(Unaligned, em_mov), N, N,
4534 static const struct gprefix pfx_0f_28_0f_29 = {
4535 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4538 static const struct gprefix pfx_0f_e7 = {
4539 N, I(Sse, em_mov), N, N,
4542 static const struct escape escape_d9 = { {
4543 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4546 N, N, N, N, N, N, N, N,
4548 N, N, N, N, N, N, N, N,
4550 N, N, N, N, N, N, N, N,
4552 N, N, N, N, N, N, N, N,
4554 N, N, N, N, N, N, N, N,
4556 N, N, N, N, N, N, N, N,
4558 N, N, N, N, N, N, N, N,
4560 N, N, N, N, N, N, N, N,
4563 static const struct escape escape_db = { {
4564 N, N, N, N, N, N, N, N,
4567 N, N, N, N, N, N, N, N,
4569 N, N, N, N, N, N, N, N,
4571 N, N, N, N, N, N, N, N,
4573 N, N, N, N, N, N, N, N,
4575 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4577 N, N, N, N, N, N, N, N,
4579 N, N, N, N, N, N, N, N,
4581 N, N, N, N, N, N, N, N,
4584 static const struct escape escape_dd = { {
4585 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4588 N, N, N, N, N, N, N, N,
4590 N, N, N, N, N, N, N, N,
4592 N, N, N, N, N, N, N, N,
4594 N, N, N, N, N, N, N, N,
4596 N, N, N, N, N, N, N, N,
4598 N, N, N, N, N, N, N, N,
4600 N, N, N, N, N, N, N, N,
4602 N, N, N, N, N, N, N, N,
4605 static const struct instr_dual instr_dual_0f_c3 = {
4606 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4609 static const struct mode_dual mode_dual_63 = {
4610 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4613 static const struct opcode opcode_table[256] = {
4615 F6ALU(Lock, em_add),
4616 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4617 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4619 F6ALU(Lock | PageTable, em_or),
4620 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4623 F6ALU(Lock, em_adc),
4624 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4625 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4627 F6ALU(Lock, em_sbb),
4628 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4629 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4631 F6ALU(Lock | PageTable, em_and), N, N,
4633 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4635 F6ALU(Lock, em_xor), N, N,
4637 F6ALU(NoWrite, em_cmp), N, N,
4639 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4641 X8(I(SrcReg | Stack, em_push)),
4643 X8(I(DstReg | Stack, em_pop)),
4645 I(ImplicitOps | Stack | No64, em_pusha),
4646 I(ImplicitOps | Stack | No64, em_popa),
4647 N, MD(ModRM, &mode_dual_63),
4650 I(SrcImm | Mov | Stack, em_push),
4651 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4652 I(SrcImmByte | Mov | Stack, em_push),
4653 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4654 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4655 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4657 X16(D(SrcImmByte | NearBranch)),
4659 G(ByteOp | DstMem | SrcImm, group1),
4660 G(DstMem | SrcImm, group1),
4661 G(ByteOp | DstMem | SrcImm | No64, group1),
4662 G(DstMem | SrcImmByte, group1),
4663 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4664 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4666 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4667 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4668 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4669 D(ModRM | SrcMem | NoAccess | DstReg),
4670 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4673 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4675 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4676 I(SrcImmFAddr | No64, em_call_far), N,
4677 II(ImplicitOps | Stack, em_pushf, pushf),
4678 II(ImplicitOps | Stack, em_popf, popf),
4679 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4681 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4682 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4683 I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4684 F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4686 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4687 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4688 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4689 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4691 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4693 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4695 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4696 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4697 I(ImplicitOps | NearBranch, em_ret),
4698 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4699 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4700 G(ByteOp, group11), G(0, group11),
4702 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4703 I(ImplicitOps | SrcImmU16, em_ret_far_imm),
4704 I(ImplicitOps, em_ret_far),
4705 D(ImplicitOps), DI(SrcImmByte, intn),
4706 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4708 G(Src2One | ByteOp, group2), G(Src2One, group2),
4709 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4710 I(DstAcc | SrcImmUByte | No64, em_aam),
4711 I(DstAcc | SrcImmUByte | No64, em_aad),
4712 F(DstAcc | ByteOp | No64, em_salc),
4713 I(DstAcc | SrcXLat | ByteOp, em_mov),
4715 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4717 X3(I(SrcImmByte | NearBranch, em_loop)),
4718 I(SrcImmByte | NearBranch, em_jcxz),
4719 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4720 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4722 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4723 I(SrcImmFAddr | No64, em_jmp_far),
4724 D(SrcImmByte | ImplicitOps | NearBranch),
4725 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4726 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4728 N, DI(ImplicitOps, icebp), N, N,
4729 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4730 G(ByteOp, group3), G(0, group3),
4732 D(ImplicitOps), D(ImplicitOps),
4733 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4734 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4737 static const struct opcode twobyte_table[256] = {
4739 G(0, group6), GD(0, &group7), N, N,
4740 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4741 II(ImplicitOps | Priv, em_clts, clts), N,
4742 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4743 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4745 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11),
4746 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11),
4748 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4749 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4751 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4752 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4753 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4755 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4758 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4759 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4760 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4763 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4764 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4765 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4766 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4767 I(ImplicitOps | EmulateOnUD, em_sysenter),
4768 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4770 N, N, N, N, N, N, N, N,
4772 X16(D(DstReg | SrcMem | ModRM)),
4774 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4779 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4784 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4786 X16(D(SrcImm | NearBranch)),
4788 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4790 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4791 II(ImplicitOps, em_cpuid, cpuid),
4792 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4793 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4794 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4796 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4797 II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4798 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4799 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4800 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4801 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4803 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4804 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4805 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4806 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4807 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4808 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4812 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4813 I(DstReg | SrcMem | ModRM, em_bsf_c),
4814 I(DstReg | SrcMem | ModRM, em_bsr_c),
4815 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4817 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4818 N, ID(0, &instr_dual_0f_c3),
4819 N, N, N, GD(0, &group9),
4821 X8(I(DstReg, em_bswap)),
4823 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4825 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4826 N, N, N, N, N, N, N, N,
4828 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4831 static const struct instr_dual instr_dual_0f_38_f0 = {
4832 I(DstReg | SrcMem | Mov, em_movbe), N
4835 static const struct instr_dual instr_dual_0f_38_f1 = {
4836 I(DstMem | SrcReg | Mov, em_movbe), N
4839 static const struct gprefix three_byte_0f_38_f0 = {
4840 ID(0, &instr_dual_0f_38_f0), N, N, N
4843 static const struct gprefix three_byte_0f_38_f1 = {
4844 ID(0, &instr_dual_0f_38_f1), N, N, N
4848 * Insns below are selected by the prefix which indexed by the third opcode
4851 static const struct opcode opcode_map_0f_38[256] = {
4853 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4855 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4857 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4858 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4879 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4883 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4889 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4890 unsigned size, bool sign_extension)
4892 int rc = X86EMUL_CONTINUE;
4896 op->addr.mem.ea = ctxt->_eip;
4897 /* NB. Immediates are sign-extended as necessary. */
4898 switch (op->bytes) {
4900 op->val = insn_fetch(s8, ctxt);
4903 op->val = insn_fetch(s16, ctxt);
4906 op->val = insn_fetch(s32, ctxt);
4909 op->val = insn_fetch(s64, ctxt);
4912 if (!sign_extension) {
4913 switch (op->bytes) {
4921 op->val &= 0xffffffff;
4929 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4932 int rc = X86EMUL_CONTINUE;
4936 decode_register_operand(ctxt, op);
4939 rc = decode_imm(ctxt, op, 1, false);
4942 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4946 if (ctxt->d & BitOp)
4947 fetch_bit_operand(ctxt);
4948 op->orig_val = op->val;
4951 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4955 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4956 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4957 fetch_register_operand(op);
4958 op->orig_val = op->val;
4962 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4963 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4964 fetch_register_operand(op);
4965 op->orig_val = op->val;
4968 if (ctxt->d & ByteOp) {
4973 op->bytes = ctxt->op_bytes;
4974 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4975 fetch_register_operand(op);
4976 op->orig_val = op->val;
4980 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4982 register_address(ctxt, VCPU_REGS_RDI);
4983 op->addr.mem.seg = VCPU_SREG_ES;
4990 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4991 fetch_register_operand(op);
4996 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4999 rc = decode_imm(ctxt, op, 1, true);
5007 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
5010 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
5013 ctxt->memop.bytes = 1;
5014 if (ctxt->memop.type == OP_REG) {
5015 ctxt->memop.addr.reg = decode_register(ctxt,
5016 ctxt->modrm_rm, true);
5017 fetch_register_operand(&ctxt->memop);
5021 ctxt->memop.bytes = 2;
5024 ctxt->memop.bytes = 4;
5027 rc = decode_imm(ctxt, op, 2, false);
5030 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
5034 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5036 register_address(ctxt, VCPU_REGS_RSI);
5037 op->addr.mem.seg = ctxt->seg_override;
5043 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5046 reg_read(ctxt, VCPU_REGS_RBX) +
5047 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
5048 op->addr.mem.seg = ctxt->seg_override;
5053 op->addr.mem.ea = ctxt->_eip;
5054 op->bytes = ctxt->op_bytes + 2;
5055 insn_fetch_arr(op->valptr, op->bytes, ctxt);
5058 ctxt->memop.bytes = ctxt->op_bytes + 2;
5062 op->val = VCPU_SREG_ES;
5066 op->val = VCPU_SREG_CS;
5070 op->val = VCPU_SREG_SS;
5074 op->val = VCPU_SREG_DS;
5078 op->val = VCPU_SREG_FS;
5082 op->val = VCPU_SREG_GS;
5085 /* Special instructions do their own operand decoding. */
5087 op->type = OP_NONE; /* Disable writeback. */
5095 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
5097 int rc = X86EMUL_CONTINUE;
5098 int mode = ctxt->mode;
5099 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
5100 bool op_prefix = false;
5101 bool has_seg_override = false;
5102 struct opcode opcode;
5104 struct desc_struct desc;
5106 ctxt->memop.type = OP_NONE;
5107 ctxt->memopp = NULL;
5108 ctxt->_eip = ctxt->eip;
5109 ctxt->fetch.ptr = ctxt->fetch.data;
5110 ctxt->fetch.end = ctxt->fetch.data + insn_len;
5111 ctxt->opcode_len = 1;
5113 memcpy(ctxt->fetch.data, insn, insn_len);
5115 rc = __do_insn_fetch_bytes(ctxt, 1);
5116 if (rc != X86EMUL_CONTINUE)
5121 case X86EMUL_MODE_REAL:
5122 case X86EMUL_MODE_VM86:
5123 def_op_bytes = def_ad_bytes = 2;
5124 ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
5126 def_op_bytes = def_ad_bytes = 4;
5128 case X86EMUL_MODE_PROT16:
5129 def_op_bytes = def_ad_bytes = 2;
5131 case X86EMUL_MODE_PROT32:
5132 def_op_bytes = def_ad_bytes = 4;
5134 #ifdef CONFIG_X86_64
5135 case X86EMUL_MODE_PROT64:
5141 return EMULATION_FAILED;
5144 ctxt->op_bytes = def_op_bytes;
5145 ctxt->ad_bytes = def_ad_bytes;
5147 /* Legacy prefixes. */
5149 switch (ctxt->b = insn_fetch(u8, ctxt)) {
5150 case 0x66: /* operand-size override */
5152 /* switch between 2/4 bytes */
5153 ctxt->op_bytes = def_op_bytes ^ 6;
5155 case 0x67: /* address-size override */
5156 if (mode == X86EMUL_MODE_PROT64)
5157 /* switch between 4/8 bytes */
5158 ctxt->ad_bytes = def_ad_bytes ^ 12;
5160 /* switch between 2/4 bytes */
5161 ctxt->ad_bytes = def_ad_bytes ^ 6;
5163 case 0x26: /* ES override */
5164 case 0x2e: /* CS override */
5165 case 0x36: /* SS override */
5166 case 0x3e: /* DS override */
5167 has_seg_override = true;
5168 ctxt->seg_override = (ctxt->b >> 3) & 3;
5170 case 0x64: /* FS override */
5171 case 0x65: /* GS override */
5172 has_seg_override = true;
5173 ctxt->seg_override = ctxt->b & 7;
5175 case 0x40 ... 0x4f: /* REX */
5176 if (mode != X86EMUL_MODE_PROT64)
5178 ctxt->rex_prefix = ctxt->b;
5180 case 0xf0: /* LOCK */
5181 ctxt->lock_prefix = 1;
5183 case 0xf2: /* REPNE/REPNZ */
5184 case 0xf3: /* REP/REPE/REPZ */
5185 ctxt->rep_prefix = ctxt->b;
5191 /* Any legacy prefix after a REX prefix nullifies its effect. */
5193 ctxt->rex_prefix = 0;
5199 if (ctxt->rex_prefix & 8)
5200 ctxt->op_bytes = 8; /* REX.W */
5202 /* Opcode byte(s). */
5203 opcode = opcode_table[ctxt->b];
5204 /* Two-byte opcode? */
5205 if (ctxt->b == 0x0f) {
5206 ctxt->opcode_len = 2;
5207 ctxt->b = insn_fetch(u8, ctxt);
5208 opcode = twobyte_table[ctxt->b];
5210 /* 0F_38 opcode map */
5211 if (ctxt->b == 0x38) {
5212 ctxt->opcode_len = 3;
5213 ctxt->b = insn_fetch(u8, ctxt);
5214 opcode = opcode_map_0f_38[ctxt->b];
5217 ctxt->d = opcode.flags;
5219 if (ctxt->d & ModRM)
5220 ctxt->modrm = insn_fetch(u8, ctxt);
5222 /* vex-prefix instructions are not implemented */
5223 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
5224 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
5228 while (ctxt->d & GroupMask) {
5229 switch (ctxt->d & GroupMask) {
5231 goffset = (ctxt->modrm >> 3) & 7;
5232 opcode = opcode.u.group[goffset];
5235 goffset = (ctxt->modrm >> 3) & 7;
5236 if ((ctxt->modrm >> 6) == 3)
5237 opcode = opcode.u.gdual->mod3[goffset];
5239 opcode = opcode.u.gdual->mod012[goffset];
5242 goffset = ctxt->modrm & 7;
5243 opcode = opcode.u.group[goffset];
5246 if (ctxt->rep_prefix && op_prefix)
5247 return EMULATION_FAILED;
5248 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
5249 switch (simd_prefix) {
5250 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
5251 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
5252 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
5253 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
5257 if (ctxt->modrm > 0xbf)
5258 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
5260 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
5263 if ((ctxt->modrm >> 6) == 3)
5264 opcode = opcode.u.idual->mod3;
5266 opcode = opcode.u.idual->mod012;
5269 if (ctxt->mode == X86EMUL_MODE_PROT64)
5270 opcode = opcode.u.mdual->mode64;
5272 opcode = opcode.u.mdual->mode32;
5275 return EMULATION_FAILED;
5278 ctxt->d &= ~(u64)GroupMask;
5279 ctxt->d |= opcode.flags;
5284 return EMULATION_FAILED;
5286 ctxt->execute = opcode.u.execute;
5288 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
5289 return EMULATION_FAILED;
5291 if (unlikely(ctxt->d &
5292 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
5295 * These are copied unconditionally here, and checked unconditionally
5296 * in x86_emulate_insn.
5298 ctxt->check_perm = opcode.check_perm;
5299 ctxt->intercept = opcode.intercept;
5301 if (ctxt->d & NotImpl)
5302 return EMULATION_FAILED;
5304 if (mode == X86EMUL_MODE_PROT64) {
5305 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
5307 else if (ctxt->d & NearBranch)
5311 if (ctxt->d & Op3264) {
5312 if (mode == X86EMUL_MODE_PROT64)
5318 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5322 ctxt->op_bytes = 16;
5323 else if (ctxt->d & Mmx)
5327 /* ModRM and SIB bytes. */
5328 if (ctxt->d & ModRM) {
5329 rc = decode_modrm(ctxt, &ctxt->memop);
5330 if (!has_seg_override) {
5331 has_seg_override = true;
5332 ctxt->seg_override = ctxt->modrm_seg;
5334 } else if (ctxt->d & MemAbs)
5335 rc = decode_abs(ctxt, &ctxt->memop);
5336 if (rc != X86EMUL_CONTINUE)
5339 if (!has_seg_override)
5340 ctxt->seg_override = VCPU_SREG_DS;
5342 ctxt->memop.addr.mem.seg = ctxt->seg_override;
5345 * Decode and fetch the source operand: register, memory
5348 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5349 if (rc != X86EMUL_CONTINUE)
5353 * Decode and fetch the second source operand: register, memory
5356 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5357 if (rc != X86EMUL_CONTINUE)
5360 /* Decode and fetch the destination operand: register or memory. */
5361 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5363 if (ctxt->rip_relative && likely(ctxt->memopp))
5364 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5365 ctxt->memopp->addr.mem.ea + ctxt->_eip);
5368 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5371 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5373 return ctxt->d & PageTable;
5376 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5378 /* The second termination condition only applies for REPE
5379 * and REPNE. Test if the repeat string operation prefix is
5380 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5381 * corresponding termination condition according to:
5382 * - if REPE/REPZ and ZF = 0 then done
5383 * - if REPNE/REPNZ and ZF = 1 then done
5385 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5386 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5387 && (((ctxt->rep_prefix == REPE_PREFIX) &&
5388 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5389 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
5390 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5396 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5400 rc = asm_safe("fwait");
5402 if (unlikely(rc != X86EMUL_CONTINUE))
5403 return emulate_exception(ctxt, MF_VECTOR, 0, false);
5405 return X86EMUL_CONTINUE;
5408 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
5411 if (op->type == OP_MM)
5412 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
5415 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
5417 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5419 if (!(ctxt->d & ByteOp))
5420 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5422 asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
5423 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5424 [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
5425 : "c"(ctxt->src2.val));
5427 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5428 if (!fop) /* exception is returned in fop variable */
5429 return emulate_de(ctxt);
5430 return X86EMUL_CONTINUE;
5433 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5435 memset(&ctxt->rip_relative, 0,
5436 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5438 ctxt->io_read.pos = 0;
5439 ctxt->io_read.end = 0;
5440 ctxt->mem_read.end = 0;
5443 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5445 const struct x86_emulate_ops *ops = ctxt->ops;
5446 int rc = X86EMUL_CONTINUE;
5447 int saved_dst_type = ctxt->dst.type;
5448 unsigned emul_flags;
5450 ctxt->mem_read.pos = 0;
5452 /* LOCK prefix is allowed only with some instructions */
5453 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5454 rc = emulate_ud(ctxt);
5458 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5459 rc = emulate_ud(ctxt);
5463 emul_flags = ctxt->ops->get_hflags(ctxt);
5464 if (unlikely(ctxt->d &
5465 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5466 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5467 (ctxt->d & Undefined)) {
5468 rc = emulate_ud(ctxt);
5472 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5473 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5474 rc = emulate_ud(ctxt);
5478 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5479 rc = emulate_nm(ctxt);
5483 if (ctxt->d & Mmx) {
5484 rc = flush_pending_x87_faults(ctxt);
5485 if (rc != X86EMUL_CONTINUE)
5488 * Now that we know the fpu is exception safe, we can fetch
5491 fetch_possible_mmx_operand(ctxt, &ctxt->src);
5492 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
5493 if (!(ctxt->d & Mov))
5494 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
5497 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5498 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5499 X86_ICPT_PRE_EXCEPT);
5500 if (rc != X86EMUL_CONTINUE)
5504 /* Instruction can only be executed in protected mode */
5505 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5506 rc = emulate_ud(ctxt);
5510 /* Privileged instruction can be executed only in CPL=0 */
5511 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5512 if (ctxt->d & PrivUD)
5513 rc = emulate_ud(ctxt);
5515 rc = emulate_gp(ctxt, 0);
5519 /* Do instruction specific permission checks */
5520 if (ctxt->d & CheckPerm) {
5521 rc = ctxt->check_perm(ctxt);
5522 if (rc != X86EMUL_CONTINUE)
5526 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5527 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5528 X86_ICPT_POST_EXCEPT);
5529 if (rc != X86EMUL_CONTINUE)
5533 if (ctxt->rep_prefix && (ctxt->d & String)) {
5534 /* All REP prefixes have the same first termination condition */
5535 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5536 string_registers_quirk(ctxt);
5537 ctxt->eip = ctxt->_eip;
5538 ctxt->eflags &= ~X86_EFLAGS_RF;
5544 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5545 rc = segmented_read(ctxt, ctxt->src.addr.mem,
5546 ctxt->src.valptr, ctxt->src.bytes);
5547 if (rc != X86EMUL_CONTINUE)
5549 ctxt->src.orig_val64 = ctxt->src.val64;
5552 if (ctxt->src2.type == OP_MEM) {
5553 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5554 &ctxt->src2.val, ctxt->src2.bytes);
5555 if (rc != X86EMUL_CONTINUE)
5559 if ((ctxt->d & DstMask) == ImplicitOps)
5563 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5564 /* optimisation - avoid slow emulated read if Mov */
5565 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5566 &ctxt->dst.val, ctxt->dst.bytes);
5567 if (rc != X86EMUL_CONTINUE) {
5568 if (!(ctxt->d & NoWrite) &&
5569 rc == X86EMUL_PROPAGATE_FAULT &&
5570 ctxt->exception.vector == PF_VECTOR)
5571 ctxt->exception.error_code |= PFERR_WRITE_MASK;
5575 /* Copy full 64-bit value for CMPXCHG8B. */
5576 ctxt->dst.orig_val64 = ctxt->dst.val64;
5580 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5581 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5582 X86_ICPT_POST_MEMACCESS);
5583 if (rc != X86EMUL_CONTINUE)
5587 if (ctxt->rep_prefix && (ctxt->d & String))
5588 ctxt->eflags |= X86_EFLAGS_RF;
5590 ctxt->eflags &= ~X86_EFLAGS_RF;
5592 if (ctxt->execute) {
5593 if (ctxt->d & Fastop) {
5594 void (*fop)(struct fastop *) = (void *)ctxt->execute;
5595 rc = fastop(ctxt, fop);
5596 if (rc != X86EMUL_CONTINUE)
5600 rc = ctxt->execute(ctxt);
5601 if (rc != X86EMUL_CONTINUE)
5606 if (ctxt->opcode_len == 2)
5608 else if (ctxt->opcode_len == 3)
5609 goto threebyte_insn;
5612 case 0x70 ... 0x7f: /* jcc (short) */
5613 if (test_cc(ctxt->b, ctxt->eflags))
5614 rc = jmp_rel(ctxt, ctxt->src.val);
5616 case 0x8d: /* lea r16/r32, m */
5617 ctxt->dst.val = ctxt->src.addr.mem.ea;
5619 case 0x90 ... 0x97: /* nop / xchg reg, rax */
5620 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5621 ctxt->dst.type = OP_NONE;
5625 case 0x98: /* cbw/cwde/cdqe */
5626 switch (ctxt->op_bytes) {
5627 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5628 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5629 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5632 case 0xcc: /* int3 */
5633 rc = emulate_int(ctxt, 3);
5635 case 0xcd: /* int n */
5636 rc = emulate_int(ctxt, ctxt->src.val);
5638 case 0xce: /* into */
5639 if (ctxt->eflags & X86_EFLAGS_OF)
5640 rc = emulate_int(ctxt, 4);
5642 case 0xe9: /* jmp rel */
5643 case 0xeb: /* jmp rel short */
5644 rc = jmp_rel(ctxt, ctxt->src.val);
5645 ctxt->dst.type = OP_NONE; /* Disable writeback. */
5647 case 0xf4: /* hlt */
5648 ctxt->ops->halt(ctxt);
5650 case 0xf5: /* cmc */
5651 /* complement carry flag from eflags reg */
5652 ctxt->eflags ^= X86_EFLAGS_CF;
5654 case 0xf8: /* clc */
5655 ctxt->eflags &= ~X86_EFLAGS_CF;
5657 case 0xf9: /* stc */
5658 ctxt->eflags |= X86_EFLAGS_CF;
5660 case 0xfc: /* cld */
5661 ctxt->eflags &= ~X86_EFLAGS_DF;
5663 case 0xfd: /* std */
5664 ctxt->eflags |= X86_EFLAGS_DF;
5667 goto cannot_emulate;
5670 if (rc != X86EMUL_CONTINUE)
5674 if (ctxt->d & SrcWrite) {
5675 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5676 rc = writeback(ctxt, &ctxt->src);
5677 if (rc != X86EMUL_CONTINUE)
5680 if (!(ctxt->d & NoWrite)) {
5681 rc = writeback(ctxt, &ctxt->dst);
5682 if (rc != X86EMUL_CONTINUE)
5687 * restore dst type in case the decoding will be reused
5688 * (happens for string instruction )
5690 ctxt->dst.type = saved_dst_type;
5692 if ((ctxt->d & SrcMask) == SrcSI)
5693 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5695 if ((ctxt->d & DstMask) == DstDI)
5696 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5698 if (ctxt->rep_prefix && (ctxt->d & String)) {
5700 struct read_cache *r = &ctxt->io_read;
5701 if ((ctxt->d & SrcMask) == SrcSI)
5702 count = ctxt->src.count;
5704 count = ctxt->dst.count;
5705 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5707 if (!string_insn_completed(ctxt)) {
5709 * Re-enter guest when pio read ahead buffer is empty
5710 * or, if it is not used, after each 1024 iteration.
5712 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5713 (r->end == 0 || r->end != r->pos)) {
5715 * Reset read cache. Usually happens before
5716 * decode, but since instruction is restarted
5717 * we have to do it here.
5719 ctxt->mem_read.end = 0;
5720 writeback_registers(ctxt);
5721 return EMULATION_RESTART;
5723 goto done; /* skip rip writeback */
5725 ctxt->eflags &= ~X86_EFLAGS_RF;
5728 ctxt->eip = ctxt->_eip;
5731 if (rc == X86EMUL_PROPAGATE_FAULT) {
5732 WARN_ON(ctxt->exception.vector > 0x1f);
5733 ctxt->have_exception = true;
5735 if (rc == X86EMUL_INTERCEPTED)
5736 return EMULATION_INTERCEPTED;
5738 if (rc == X86EMUL_CONTINUE)
5739 writeback_registers(ctxt);
5741 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5745 case 0x09: /* wbinvd */
5746 (ctxt->ops->wbinvd)(ctxt);
5748 case 0x08: /* invd */
5749 case 0x0d: /* GrpP (prefetch) */
5750 case 0x18: /* Grp16 (prefetch/nop) */
5751 case 0x1f: /* nop */
5753 case 0x20: /* mov cr, reg */
5754 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5756 case 0x21: /* mov from dr to reg */
5757 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5759 case 0x40 ... 0x4f: /* cmov */
5760 if (test_cc(ctxt->b, ctxt->eflags))
5761 ctxt->dst.val = ctxt->src.val;
5762 else if (ctxt->op_bytes != 4)
5763 ctxt->dst.type = OP_NONE; /* no writeback */
5765 case 0x80 ... 0x8f: /* jnz rel, etc*/
5766 if (test_cc(ctxt->b, ctxt->eflags))
5767 rc = jmp_rel(ctxt, ctxt->src.val);
5769 case 0x90 ... 0x9f: /* setcc r/m8 */
5770 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5772 case 0xb6 ... 0xb7: /* movzx */
5773 ctxt->dst.bytes = ctxt->op_bytes;
5774 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5775 : (u16) ctxt->src.val;
5777 case 0xbe ... 0xbf: /* movsx */
5778 ctxt->dst.bytes = ctxt->op_bytes;
5779 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5780 (s16) ctxt->src.val;
5783 goto cannot_emulate;
5788 if (rc != X86EMUL_CONTINUE)
5794 return EMULATION_FAILED;
5797 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5799 invalidate_registers(ctxt);
5802 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5804 writeback_registers(ctxt);
5807 bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5809 if (ctxt->rep_prefix && (ctxt->d & String))
5812 if (ctxt->d & TwoMemOp)