1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <asm/kvm_emulate.h>
26 #include <linux/stringify.h>
27 #include <asm/debugreg.h>
28 #include <asm/nospec-branch.h>
39 #define OpImplicit 1ull /* No generic decode */
40 #define OpReg 2ull /* Register */
41 #define OpMem 3ull /* Memory */
42 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
43 #define OpDI 5ull /* ES:DI/EDI/RDI */
44 #define OpMem64 6ull /* Memory, 64-bit */
45 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
46 #define OpDX 8ull /* DX register */
47 #define OpCL 9ull /* CL register (for shifts) */
48 #define OpImmByte 10ull /* 8-bit sign extended immediate */
49 #define OpOne 11ull /* Implied 1 */
50 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
51 #define OpMem16 13ull /* Memory operand (16-bit). */
52 #define OpMem32 14ull /* Memory operand (32-bit). */
53 #define OpImmU 15ull /* Immediate operand, zero extended */
54 #define OpSI 16ull /* SI/ESI/RSI */
55 #define OpImmFAddr 17ull /* Immediate far address */
56 #define OpMemFAddr 18ull /* Far address in memory */
57 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
58 #define OpES 20ull /* ES */
59 #define OpCS 21ull /* CS */
60 #define OpSS 22ull /* SS */
61 #define OpDS 23ull /* DS */
62 #define OpFS 24ull /* FS */
63 #define OpGS 25ull /* GS */
64 #define OpMem8 26ull /* 8-bit zero extended memory operand */
65 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
66 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
67 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
68 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
70 #define OpBits 5 /* Width of operand field */
71 #define OpMask ((1ull << OpBits) - 1)
74 * Opcode effective-address decode tables.
75 * Note that we only emulate instructions that have at least one memory
76 * operand (excluding implicit stack references). We assume that stack
77 * references and instruction fetches will never occur in special memory
78 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
82 /* Operand sizes: 8-bit operands or specified/overridden size. */
83 #define ByteOp (1<<0) /* 8-bit operands. */
84 /* Destination operand type. */
86 #define ImplicitOps (OpImplicit << DstShift)
87 #define DstReg (OpReg << DstShift)
88 #define DstMem (OpMem << DstShift)
89 #define DstAcc (OpAcc << DstShift)
90 #define DstDI (OpDI << DstShift)
91 #define DstMem64 (OpMem64 << DstShift)
92 #define DstMem16 (OpMem16 << DstShift)
93 #define DstImmUByte (OpImmUByte << DstShift)
94 #define DstDX (OpDX << DstShift)
95 #define DstAccLo (OpAccLo << DstShift)
96 #define DstMask (OpMask << DstShift)
97 /* Source operand type. */
99 #define SrcNone (OpNone << SrcShift)
100 #define SrcReg (OpReg << SrcShift)
101 #define SrcMem (OpMem << SrcShift)
102 #define SrcMem16 (OpMem16 << SrcShift)
103 #define SrcMem32 (OpMem32 << SrcShift)
104 #define SrcImm (OpImm << SrcShift)
105 #define SrcImmByte (OpImmByte << SrcShift)
106 #define SrcOne (OpOne << SrcShift)
107 #define SrcImmUByte (OpImmUByte << SrcShift)
108 #define SrcImmU (OpImmU << SrcShift)
109 #define SrcSI (OpSI << SrcShift)
110 #define SrcXLat (OpXLat << SrcShift)
111 #define SrcImmFAddr (OpImmFAddr << SrcShift)
112 #define SrcMemFAddr (OpMemFAddr << SrcShift)
113 #define SrcAcc (OpAcc << SrcShift)
114 #define SrcImmU16 (OpImmU16 << SrcShift)
115 #define SrcImm64 (OpImm64 << SrcShift)
116 #define SrcDX (OpDX << SrcShift)
117 #define SrcMem8 (OpMem8 << SrcShift)
118 #define SrcAccHi (OpAccHi << SrcShift)
119 #define SrcMask (OpMask << SrcShift)
120 #define BitOp (1<<11)
121 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
122 #define String (1<<13) /* String instruction (rep capable) */
123 #define Stack (1<<14) /* Stack instruction (push/pop) */
124 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
125 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
126 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
127 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
128 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
129 #define Escape (5<<15) /* Escape to coprocessor instruction */
130 #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
131 #define ModeDual (7<<15) /* Different instruction for 32/64 bit */
132 #define Sse (1<<18) /* SSE Vector instruction */
133 /* Generic ModRM decode. */
134 #define ModRM (1<<19)
135 /* Destination is only written; never read. */
138 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
139 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
140 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
141 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
142 #define Undefined (1<<25) /* No Such Instruction */
143 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
144 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
146 #define PageTable (1 << 29) /* instruction used to write page table */
147 #define NotImpl (1 << 30) /* instruction is not implemented */
148 /* Source 2 operand type */
149 #define Src2Shift (31)
150 #define Src2None (OpNone << Src2Shift)
151 #define Src2Mem (OpMem << Src2Shift)
152 #define Src2CL (OpCL << Src2Shift)
153 #define Src2ImmByte (OpImmByte << Src2Shift)
154 #define Src2One (OpOne << Src2Shift)
155 #define Src2Imm (OpImm << Src2Shift)
156 #define Src2ES (OpES << Src2Shift)
157 #define Src2CS (OpCS << Src2Shift)
158 #define Src2SS (OpSS << Src2Shift)
159 #define Src2DS (OpDS << Src2Shift)
160 #define Src2FS (OpFS << Src2Shift)
161 #define Src2GS (OpGS << Src2Shift)
162 #define Src2Mask (OpMask << Src2Shift)
163 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
164 #define AlignMask ((u64)7 << 41)
165 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
166 #define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */
167 #define Avx ((u64)3 << 41) /* Advanced Vector Extensions */
168 #define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
169 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
170 #define NoWrite ((u64)1 << 45) /* No writeback */
171 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
172 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
173 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
174 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
175 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
176 #define NearBranch ((u64)1 << 52) /* Near branches */
177 #define No16 ((u64)1 << 53) /* No 16 bit operand */
178 #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
179 #define TwoMemOp ((u64)1 << 55) /* Instruction has two memory operand */
181 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
183 #define X2(x...) x, x
184 #define X3(x...) X2(x), x
185 #define X4(x...) X2(x), X2(x)
186 #define X5(x...) X4(x), x
187 #define X6(x...) X4(x), X2(x)
188 #define X7(x...) X4(x), X3(x)
189 #define X8(x...) X4(x), X4(x)
190 #define X16(x...) X8(x), X8(x)
192 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
193 #define FASTOP_SIZE 8
196 * fastop functions have a special calling convention:
201 * flags: rflags (in/out)
202 * ex: rsi (in:fastop pointer, out:zero if exception)
204 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
205 * different operand sizes can be reached by calculation, rather than a jump
206 * table (which would be bigger than the code).
208 * fastop functions are declared as taking a never-defined fastop parameter,
209 * so they can't be called from C directly.
218 int (*execute)(struct x86_emulate_ctxt *ctxt);
219 const struct opcode *group;
220 const struct group_dual *gdual;
221 const struct gprefix *gprefix;
222 const struct escape *esc;
223 const struct instr_dual *idual;
224 const struct mode_dual *mdual;
225 void (*fastop)(struct fastop *fake);
227 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
231 struct opcode mod012[8];
232 struct opcode mod3[8];
236 struct opcode pfx_no;
237 struct opcode pfx_66;
238 struct opcode pfx_f2;
239 struct opcode pfx_f3;
244 struct opcode high[64];
248 struct opcode mod012;
253 struct opcode mode32;
254 struct opcode mode64;
257 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
259 enum x86_transfer_type {
261 X86_TRANSFER_CALL_JMP,
263 X86_TRANSFER_TASK_SWITCH,
266 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
268 if (!(ctxt->regs_valid & (1 << nr))) {
269 ctxt->regs_valid |= 1 << nr;
270 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
272 return ctxt->_regs[nr];
275 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
277 ctxt->regs_valid |= 1 << nr;
278 ctxt->regs_dirty |= 1 << nr;
279 return &ctxt->_regs[nr];
282 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
285 return reg_write(ctxt, nr);
288 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
292 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
293 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
296 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
298 ctxt->regs_dirty = 0;
299 ctxt->regs_valid = 0;
303 * These EFLAGS bits are restored from saved value during emulation, and
304 * any changes are written back to the saved value after emulation.
306 #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
307 X86_EFLAGS_PF|X86_EFLAGS_CF)
315 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
317 #define FOP_FUNC(name) \
318 ".align " __stringify(FASTOP_SIZE) " \n\t" \
319 ".type " name ", @function \n\t" \
322 #define FOP_RET "ret \n\t"
324 #define FOP_START(op) \
325 extern void em_##op(struct fastop *fake); \
326 asm(".pushsection .text, \"ax\" \n\t" \
327 ".global em_" #op " \n\t" \
334 FOP_FUNC(__stringify(__UNIQUE_ID(nop))) \
337 #define FOP1E(op, dst) \
338 FOP_FUNC(#op "_" #dst) \
339 "10: " #op " %" #dst " \n\t" FOP_RET
341 #define FOP1EEX(op, dst) \
342 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
344 #define FASTOP1(op) \
349 ON64(FOP1E(op##q, rax)) \
352 /* 1-operand, using src2 (for MUL/DIV r/m) */
353 #define FASTOP1SRC2(op, name) \
358 ON64(FOP1E(op, rcx)) \
361 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
362 #define FASTOP1SRC2EX(op, name) \
367 ON64(FOP1EEX(op, rcx)) \
370 #define FOP2E(op, dst, src) \
371 FOP_FUNC(#op "_" #dst "_" #src) \
372 #op " %" #src ", %" #dst " \n\t" FOP_RET
374 #define FASTOP2(op) \
376 FOP2E(op##b, al, dl) \
377 FOP2E(op##w, ax, dx) \
378 FOP2E(op##l, eax, edx) \
379 ON64(FOP2E(op##q, rax, rdx)) \
382 /* 2 operand, word only */
383 #define FASTOP2W(op) \
386 FOP2E(op##w, ax, dx) \
387 FOP2E(op##l, eax, edx) \
388 ON64(FOP2E(op##q, rax, rdx)) \
391 /* 2 operand, src is CL */
392 #define FASTOP2CL(op) \
394 FOP2E(op##b, al, cl) \
395 FOP2E(op##w, ax, cl) \
396 FOP2E(op##l, eax, cl) \
397 ON64(FOP2E(op##q, rax, cl)) \
400 /* 2 operand, src and dest are reversed */
401 #define FASTOP2R(op, name) \
403 FOP2E(op##b, dl, al) \
404 FOP2E(op##w, dx, ax) \
405 FOP2E(op##l, edx, eax) \
406 ON64(FOP2E(op##q, rdx, rax)) \
409 #define FOP3E(op, dst, src, src2) \
410 FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
411 #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
413 /* 3-operand, word-only, src2=cl */
414 #define FASTOP3WCL(op) \
417 FOP3E(op##w, ax, dx, cl) \
418 FOP3E(op##l, eax, edx, cl) \
419 ON64(FOP3E(op##q, rax, rdx, cl)) \
422 /* Special case for SETcc - 1 instruction per cc */
423 #define FOP_SETCC(op) \
425 ".type " #op ", @function \n\t" \
430 asm(".pushsection .fixup, \"ax\"\n"
431 ".global kvm_fastop_exception \n"
432 "kvm_fastop_exception: xor %esi, %esi; ret\n"
454 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
458 * XXX: inoutclob user must know where the argument is being expanded.
459 * Relying on CONFIG_CC_HAS_ASM_GOTO would allow us to remove _fault.
461 #define asm_safe(insn, inoutclob...) \
465 asm volatile("1:" insn "\n" \
467 ".pushsection .fixup, \"ax\"\n" \
468 "3: movl $1, %[_fault]\n" \
471 _ASM_EXTABLE(1b, 3b) \
472 : [_fault] "+qm"(_fault) inoutclob ); \
474 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
477 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
478 enum x86_intercept intercept,
479 enum x86_intercept_stage stage)
481 struct x86_instruction_info info = {
482 .intercept = intercept,
483 .rep_prefix = ctxt->rep_prefix,
484 .modrm_mod = ctxt->modrm_mod,
485 .modrm_reg = ctxt->modrm_reg,
486 .modrm_rm = ctxt->modrm_rm,
487 .src_val = ctxt->src.val64,
488 .dst_val = ctxt->dst.val64,
489 .src_bytes = ctxt->src.bytes,
490 .dst_bytes = ctxt->dst.bytes,
491 .ad_bytes = ctxt->ad_bytes,
492 .next_rip = ctxt->eip,
495 return ctxt->ops->intercept(ctxt, &info, stage);
498 static void assign_masked(ulong *dest, ulong src, ulong mask)
500 *dest = (*dest & ~mask) | (src & mask);
503 static void assign_register(unsigned long *reg, u64 val, int bytes)
505 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
508 *(u8 *)reg = (u8)val;
511 *(u16 *)reg = (u16)val;
515 break; /* 64b: zero-extend */
522 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
524 return (1UL << (ctxt->ad_bytes << 3)) - 1;
527 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
530 struct desc_struct ss;
532 if (ctxt->mode == X86EMUL_MODE_PROT64)
534 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
535 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
538 static int stack_size(struct x86_emulate_ctxt *ctxt)
540 return (__fls(stack_mask(ctxt)) + 1) >> 3;
543 /* Access/update address held in a register, based on addressing mode. */
544 static inline unsigned long
545 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
547 if (ctxt->ad_bytes == sizeof(unsigned long))
550 return reg & ad_mask(ctxt);
553 static inline unsigned long
554 register_address(struct x86_emulate_ctxt *ctxt, int reg)
556 return address_mask(ctxt, reg_read(ctxt, reg));
559 static void masked_increment(ulong *reg, ulong mask, int inc)
561 assign_masked(reg, *reg + inc, mask);
565 register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
567 ulong *preg = reg_rmw(ctxt, reg);
569 assign_register(preg, *preg + inc, ctxt->ad_bytes);
572 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
574 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
577 static u32 desc_limit_scaled(struct desc_struct *desc)
579 u32 limit = get_desc_limit(desc);
581 return desc->g ? (limit << 12) | 0xfff : limit;
584 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
586 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
589 return ctxt->ops->get_cached_segment_base(ctxt, seg);
592 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
593 u32 error, bool valid)
596 ctxt->exception.vector = vec;
597 ctxt->exception.error_code = error;
598 ctxt->exception.error_code_valid = valid;
599 return X86EMUL_PROPAGATE_FAULT;
602 static int emulate_db(struct x86_emulate_ctxt *ctxt)
604 return emulate_exception(ctxt, DB_VECTOR, 0, false);
607 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
609 return emulate_exception(ctxt, GP_VECTOR, err, true);
612 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
614 return emulate_exception(ctxt, SS_VECTOR, err, true);
617 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
619 return emulate_exception(ctxt, UD_VECTOR, 0, false);
622 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
624 return emulate_exception(ctxt, TS_VECTOR, err, true);
627 static int emulate_de(struct x86_emulate_ctxt *ctxt)
629 return emulate_exception(ctxt, DE_VECTOR, 0, false);
632 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
634 return emulate_exception(ctxt, NM_VECTOR, 0, false);
637 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
640 struct desc_struct desc;
642 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
646 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
651 struct desc_struct desc;
653 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
654 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
658 * x86 defines three classes of vector instructions: explicitly
659 * aligned, explicitly unaligned, and the rest, which change behaviour
660 * depending on whether they're AVX encoded or not.
662 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
663 * subject to the same check. FXSAVE and FXRSTOR are checked here too as their
664 * 512 bytes of data must be aligned to a 16 byte boundary.
666 static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
668 u64 alignment = ctxt->d & AlignMask;
670 if (likely(size < 16))
685 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
686 struct segmented_address addr,
687 unsigned *max_size, unsigned size,
688 bool write, bool fetch,
689 enum x86emul_mode mode, ulong *linear)
691 struct desc_struct desc;
698 la = seg_base(ctxt, addr.seg) + addr.ea;
701 case X86EMUL_MODE_PROT64:
703 va_bits = ctxt_virt_addr_bits(ctxt);
704 if (get_canonical(la, va_bits) != la)
707 *max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
708 if (size > *max_size)
712 *linear = la = (u32)la;
713 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
717 /* code segment in protected mode or read-only data segment */
718 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
719 || !(desc.type & 2)) && write)
721 /* unreadable code segment */
722 if (!fetch && (desc.type & 8) && !(desc.type & 2))
724 lim = desc_limit_scaled(&desc);
725 if (!(desc.type & 8) && (desc.type & 4)) {
726 /* expand-down segment */
729 lim = desc.d ? 0xffffffff : 0xffff;
733 if (lim == 0xffffffff)
736 *max_size = (u64)lim + 1 - addr.ea;
737 if (size > *max_size)
742 if (la & (insn_alignment(ctxt, size) - 1))
743 return emulate_gp(ctxt, 0);
744 return X86EMUL_CONTINUE;
746 if (addr.seg == VCPU_SREG_SS)
747 return emulate_ss(ctxt, 0);
749 return emulate_gp(ctxt, 0);
752 static int linearize(struct x86_emulate_ctxt *ctxt,
753 struct segmented_address addr,
754 unsigned size, bool write,
758 return __linearize(ctxt, addr, &max_size, size, write, false,
762 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
763 enum x86emul_mode mode)
768 struct segmented_address addr = { .seg = VCPU_SREG_CS,
771 if (ctxt->op_bytes != sizeof(unsigned long))
772 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
773 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
774 if (rc == X86EMUL_CONTINUE)
775 ctxt->_eip = addr.ea;
779 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
781 return assign_eip(ctxt, dst, ctxt->mode);
784 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
785 const struct desc_struct *cs_desc)
787 enum x86emul_mode mode = ctxt->mode;
791 if (ctxt->mode >= X86EMUL_MODE_PROT16) {
795 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
797 mode = X86EMUL_MODE_PROT64;
799 mode = X86EMUL_MODE_PROT32; /* temporary value */
802 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
803 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
804 rc = assign_eip(ctxt, dst, mode);
805 if (rc == X86EMUL_CONTINUE)
810 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
812 return assign_eip_near(ctxt, ctxt->_eip + rel);
815 static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
816 void *data, unsigned size)
818 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
821 static int linear_write_system(struct x86_emulate_ctxt *ctxt,
822 ulong linear, void *data,
825 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
828 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
829 struct segmented_address addr,
836 rc = linearize(ctxt, addr, size, false, &linear);
837 if (rc != X86EMUL_CONTINUE)
839 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
842 static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
843 struct segmented_address addr,
850 rc = linearize(ctxt, addr, size, true, &linear);
851 if (rc != X86EMUL_CONTINUE)
853 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
857 * Prefetch the remaining bytes of the instruction without crossing page
858 * boundary if they are not in fetch_cache yet.
860 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
863 unsigned size, max_size;
864 unsigned long linear;
865 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
866 struct segmented_address addr = { .seg = VCPU_SREG_CS,
867 .ea = ctxt->eip + cur_size };
870 * We do not know exactly how many bytes will be needed, and
871 * __linearize is expensive, so fetch as much as possible. We
872 * just have to avoid going beyond the 15 byte limit, the end
873 * of the segment, or the end of the page.
875 * __linearize is called with size 0 so that it does not do any
876 * boundary check itself. Instead, we use max_size to check
879 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
881 if (unlikely(rc != X86EMUL_CONTINUE))
884 size = min_t(unsigned, 15UL ^ cur_size, max_size);
885 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
888 * One instruction can only straddle two pages,
889 * and one has been loaded at the beginning of
890 * x86_decode_insn. So, if not enough bytes
891 * still, we must have hit the 15-byte boundary.
893 if (unlikely(size < op_size))
894 return emulate_gp(ctxt, 0);
896 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
897 size, &ctxt->exception);
898 if (unlikely(rc != X86EMUL_CONTINUE))
900 ctxt->fetch.end += size;
901 return X86EMUL_CONTINUE;
904 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
907 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
909 if (unlikely(done_size < size))
910 return __do_insn_fetch_bytes(ctxt, size - done_size);
912 return X86EMUL_CONTINUE;
915 /* Fetch next part of the instruction being emulated. */
916 #define insn_fetch(_type, _ctxt) \
919 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
920 if (rc != X86EMUL_CONTINUE) \
922 ctxt->_eip += sizeof(_type); \
923 memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
924 ctxt->fetch.ptr += sizeof(_type); \
928 #define insn_fetch_arr(_arr, _size, _ctxt) \
930 rc = do_insn_fetch_bytes(_ctxt, _size); \
931 if (rc != X86EMUL_CONTINUE) \
933 ctxt->_eip += (_size); \
934 memcpy(_arr, ctxt->fetch.ptr, _size); \
935 ctxt->fetch.ptr += (_size); \
939 * Given the 'reg' portion of a ModRM byte, and a register block, return a
940 * pointer into the block that addresses the relevant register.
941 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
943 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
947 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
949 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
950 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
952 p = reg_rmw(ctxt, modrm_reg);
956 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
957 struct segmented_address addr,
958 u16 *size, unsigned long *address, int op_bytes)
965 rc = segmented_read_std(ctxt, addr, size, 2);
966 if (rc != X86EMUL_CONTINUE)
969 rc = segmented_read_std(ctxt, addr, address, op_bytes);
983 FASTOP1SRC2(mul, mul_ex);
984 FASTOP1SRC2(imul, imul_ex);
985 FASTOP1SRC2EX(div, div_ex);
986 FASTOP1SRC2EX(idiv, idiv_ex);
1015 FASTOP2R(cmp, cmp_r);
1017 static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1019 /* If src is zero, do not writeback, but update flags */
1020 if (ctxt->src.val == 0)
1021 ctxt->dst.type = OP_NONE;
1022 return fastop(ctxt, em_bsf);
1025 static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1027 /* If src is zero, do not writeback, but update flags */
1028 if (ctxt->src.val == 0)
1029 ctxt->dst.type = OP_NONE;
1030 return fastop(ctxt, em_bsr);
1033 static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1036 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
1038 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1039 asm("push %[flags]; popf; " CALL_NOSPEC
1040 : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
1044 static void fetch_register_operand(struct operand *op)
1046 switch (op->bytes) {
1048 op->val = *(u8 *)op->addr.reg;
1051 op->val = *(u16 *)op->addr.reg;
1054 op->val = *(u32 *)op->addr.reg;
1057 op->val = *(u64 *)op->addr.reg;
1062 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
1065 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
1066 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
1067 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
1068 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
1069 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
1070 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
1071 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
1072 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
1073 #ifdef CONFIG_X86_64
1074 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
1075 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
1076 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
1077 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
1078 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
1079 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
1080 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
1081 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
1087 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
1091 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1092 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
1093 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
1094 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
1095 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
1096 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
1097 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
1098 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
1099 #ifdef CONFIG_X86_64
1100 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1101 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1102 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1103 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1104 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1105 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1106 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1107 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1113 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1116 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1117 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1118 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1119 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1120 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1121 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1122 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1123 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1128 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1131 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1132 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1133 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1134 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1135 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1136 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1137 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1138 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1143 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1145 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1146 return emulate_nm(ctxt);
1148 asm volatile("fninit");
1149 return X86EMUL_CONTINUE;
1152 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1156 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1157 return emulate_nm(ctxt);
1159 asm volatile("fnstcw %0": "+m"(fcw));
1161 ctxt->dst.val = fcw;
1163 return X86EMUL_CONTINUE;
1166 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1170 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1171 return emulate_nm(ctxt);
1173 asm volatile("fnstsw %0": "+m"(fsw));
1175 ctxt->dst.val = fsw;
1177 return X86EMUL_CONTINUE;
1180 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1183 unsigned reg = ctxt->modrm_reg;
1185 if (!(ctxt->d & ModRM))
1186 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1188 if (ctxt->d & Sse) {
1192 read_sse_reg(ctxt, &op->vec_val, reg);
1195 if (ctxt->d & Mmx) {
1204 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1205 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1207 fetch_register_operand(op);
1208 op->orig_val = op->val;
1211 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1213 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1214 ctxt->modrm_seg = VCPU_SREG_SS;
1217 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1221 int index_reg, base_reg, scale;
1222 int rc = X86EMUL_CONTINUE;
1225 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1226 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1227 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1229 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1230 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1231 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1232 ctxt->modrm_seg = VCPU_SREG_DS;
1234 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1236 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1237 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1239 if (ctxt->d & Sse) {
1242 op->addr.xmm = ctxt->modrm_rm;
1243 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1246 if (ctxt->d & Mmx) {
1249 op->addr.mm = ctxt->modrm_rm & 7;
1252 fetch_register_operand(op);
1258 if (ctxt->ad_bytes == 2) {
1259 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1260 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1261 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1262 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1264 /* 16-bit ModR/M decode. */
1265 switch (ctxt->modrm_mod) {
1267 if (ctxt->modrm_rm == 6)
1268 modrm_ea += insn_fetch(u16, ctxt);
1271 modrm_ea += insn_fetch(s8, ctxt);
1274 modrm_ea += insn_fetch(u16, ctxt);
1277 switch (ctxt->modrm_rm) {
1279 modrm_ea += bx + si;
1282 modrm_ea += bx + di;
1285 modrm_ea += bp + si;
1288 modrm_ea += bp + di;
1297 if (ctxt->modrm_mod != 0)
1304 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1305 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1306 ctxt->modrm_seg = VCPU_SREG_SS;
1307 modrm_ea = (u16)modrm_ea;
1309 /* 32/64-bit ModR/M decode. */
1310 if ((ctxt->modrm_rm & 7) == 4) {
1311 sib = insn_fetch(u8, ctxt);
1312 index_reg |= (sib >> 3) & 7;
1313 base_reg |= sib & 7;
1316 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1317 modrm_ea += insn_fetch(s32, ctxt);
1319 modrm_ea += reg_read(ctxt, base_reg);
1320 adjust_modrm_seg(ctxt, base_reg);
1321 /* Increment ESP on POP [ESP] */
1322 if ((ctxt->d & IncSP) &&
1323 base_reg == VCPU_REGS_RSP)
1324 modrm_ea += ctxt->op_bytes;
1327 modrm_ea += reg_read(ctxt, index_reg) << scale;
1328 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1329 modrm_ea += insn_fetch(s32, ctxt);
1330 if (ctxt->mode == X86EMUL_MODE_PROT64)
1331 ctxt->rip_relative = 1;
1333 base_reg = ctxt->modrm_rm;
1334 modrm_ea += reg_read(ctxt, base_reg);
1335 adjust_modrm_seg(ctxt, base_reg);
1337 switch (ctxt->modrm_mod) {
1339 modrm_ea += insn_fetch(s8, ctxt);
1342 modrm_ea += insn_fetch(s32, ctxt);
1346 op->addr.mem.ea = modrm_ea;
1347 if (ctxt->ad_bytes != 8)
1348 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1354 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1357 int rc = X86EMUL_CONTINUE;
1360 switch (ctxt->ad_bytes) {
1362 op->addr.mem.ea = insn_fetch(u16, ctxt);
1365 op->addr.mem.ea = insn_fetch(u32, ctxt);
1368 op->addr.mem.ea = insn_fetch(u64, ctxt);
1375 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1379 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1380 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1382 if (ctxt->src.bytes == 2)
1383 sv = (s16)ctxt->src.val & (s16)mask;
1384 else if (ctxt->src.bytes == 4)
1385 sv = (s32)ctxt->src.val & (s32)mask;
1387 sv = (s64)ctxt->src.val & (s64)mask;
1389 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1390 ctxt->dst.addr.mem.ea + (sv >> 3));
1393 /* only subword offset */
1394 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1397 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1398 unsigned long addr, void *dest, unsigned size)
1401 struct read_cache *mc = &ctxt->mem_read;
1403 if (mc->pos < mc->end)
1406 WARN_ON((mc->end + size) >= sizeof(mc->data));
1408 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1410 if (rc != X86EMUL_CONTINUE)
1416 memcpy(dest, mc->data + mc->pos, size);
1418 return X86EMUL_CONTINUE;
1421 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1422 struct segmented_address addr,
1429 rc = linearize(ctxt, addr, size, false, &linear);
1430 if (rc != X86EMUL_CONTINUE)
1432 return read_emulated(ctxt, linear, data, size);
1435 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1436 struct segmented_address addr,
1443 rc = linearize(ctxt, addr, size, true, &linear);
1444 if (rc != X86EMUL_CONTINUE)
1446 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1450 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1451 struct segmented_address addr,
1452 const void *orig_data, const void *data,
1458 rc = linearize(ctxt, addr, size, true, &linear);
1459 if (rc != X86EMUL_CONTINUE)
1461 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1462 size, &ctxt->exception);
1465 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1466 unsigned int size, unsigned short port,
1469 struct read_cache *rc = &ctxt->io_read;
1471 if (rc->pos == rc->end) { /* refill pio read ahead */
1472 unsigned int in_page, n;
1473 unsigned int count = ctxt->rep_prefix ?
1474 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1475 in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1476 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1477 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1478 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1481 rc->pos = rc->end = 0;
1482 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1487 if (ctxt->rep_prefix && (ctxt->d & String) &&
1488 !(ctxt->eflags & X86_EFLAGS_DF)) {
1489 ctxt->dst.data = rc->data + rc->pos;
1490 ctxt->dst.type = OP_MEM_STR;
1491 ctxt->dst.count = (rc->end - rc->pos) / size;
1494 memcpy(dest, rc->data + rc->pos, size);
1500 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1501 u16 index, struct desc_struct *desc)
1506 ctxt->ops->get_idt(ctxt, &dt);
1508 if (dt.size < index * 8 + 7)
1509 return emulate_gp(ctxt, index << 3 | 0x2);
1511 addr = dt.address + index * 8;
1512 return linear_read_system(ctxt, addr, desc, sizeof(*desc));
1515 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1516 u16 selector, struct desc_ptr *dt)
1518 const struct x86_emulate_ops *ops = ctxt->ops;
1521 if (selector & 1 << 2) {
1522 struct desc_struct desc;
1525 memset(dt, 0, sizeof(*dt));
1526 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1530 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1531 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1533 ops->get_gdt(ctxt, dt);
1536 static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1537 u16 selector, ulong *desc_addr_p)
1540 u16 index = selector >> 3;
1543 get_descriptor_table_ptr(ctxt, selector, &dt);
1545 if (dt.size < index * 8 + 7)
1546 return emulate_gp(ctxt, selector & 0xfffc);
1548 addr = dt.address + index * 8;
1550 #ifdef CONFIG_X86_64
1551 if (addr >> 32 != 0) {
1554 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1555 if (!(efer & EFER_LMA))
1560 *desc_addr_p = addr;
1561 return X86EMUL_CONTINUE;
1564 /* allowed just for 8 bytes segments */
1565 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1566 u16 selector, struct desc_struct *desc,
1571 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1572 if (rc != X86EMUL_CONTINUE)
1575 return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
1578 /* allowed just for 8 bytes segments */
1579 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1580 u16 selector, struct desc_struct *desc)
1585 rc = get_descriptor_ptr(ctxt, selector, &addr);
1586 if (rc != X86EMUL_CONTINUE)
1589 return linear_write_system(ctxt, addr, desc, sizeof(*desc));
1592 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1593 u16 selector, int seg, u8 cpl,
1594 enum x86_transfer_type transfer,
1595 struct desc_struct *desc)
1597 struct desc_struct seg_desc, old_desc;
1599 unsigned err_vec = GP_VECTOR;
1601 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1607 memset(&seg_desc, 0, sizeof(seg_desc));
1609 if (ctxt->mode == X86EMUL_MODE_REAL) {
1610 /* set real mode segment descriptor (keep limit etc. for
1612 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1613 set_desc_base(&seg_desc, selector << 4);
1615 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1616 /* VM86 needs a clean new segment descriptor */
1617 set_desc_base(&seg_desc, selector << 4);
1618 set_desc_limit(&seg_desc, 0xffff);
1628 /* TR should be in GDT only */
1629 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1632 /* NULL selector is not valid for TR, CS and (except for long mode) SS */
1633 if (null_selector) {
1634 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1637 if (seg == VCPU_SREG_SS) {
1638 if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1642 * ctxt->ops->set_segment expects the CPL to be in
1643 * SS.DPL, so fake an expand-up 32-bit data segment.
1653 /* Skip all following checks */
1657 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1658 if (ret != X86EMUL_CONTINUE)
1661 err_code = selector & 0xfffc;
1662 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1665 /* can't load system descriptor into segment selector */
1666 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1667 if (transfer == X86_TRANSFER_CALL_JMP)
1668 return X86EMUL_UNHANDLEABLE;
1673 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1682 * segment is not a writable data segment or segment
1683 * selector's RPL != CPL or segment selector's RPL != CPL
1685 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1689 if (!(seg_desc.type & 8))
1692 if (seg_desc.type & 4) {
1698 if (rpl > cpl || dpl != cpl)
1701 /* in long-mode d/b must be clear if l is set */
1702 if (seg_desc.d && seg_desc.l) {
1705 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1706 if (efer & EFER_LMA)
1710 /* CS(RPL) <- CPL */
1711 selector = (selector & 0xfffc) | cpl;
1714 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1716 old_desc = seg_desc;
1717 seg_desc.type |= 2; /* busy */
1718 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1719 sizeof(seg_desc), &ctxt->exception);
1720 if (ret != X86EMUL_CONTINUE)
1723 case VCPU_SREG_LDTR:
1724 if (seg_desc.s || seg_desc.type != 2)
1727 default: /* DS, ES, FS, or GS */
1729 * segment is not a data or readable code segment or
1730 * ((segment is a data or nonconforming code segment)
1731 * and (both RPL and CPL > DPL))
1733 if ((seg_desc.type & 0xa) == 0x8 ||
1734 (((seg_desc.type & 0xc) != 0xc) &&
1735 (rpl > dpl && cpl > dpl)))
1741 /* mark segment as accessed */
1742 if (!(seg_desc.type & 1)) {
1744 ret = write_segment_descriptor(ctxt, selector,
1746 if (ret != X86EMUL_CONTINUE)
1749 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1750 ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
1751 if (ret != X86EMUL_CONTINUE)
1753 if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
1754 ((u64)base3 << 32), ctxt))
1755 return emulate_gp(ctxt, 0);
1758 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1761 return X86EMUL_CONTINUE;
1763 return emulate_exception(ctxt, err_vec, err_code, true);
1766 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1767 u16 selector, int seg)
1769 u8 cpl = ctxt->ops->cpl(ctxt);
1772 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
1773 * they can load it at CPL<3 (Intel's manual says only LSS can,
1776 * However, the Intel manual says that putting IST=1/DPL=3 in
1777 * an interrupt gate will result in SS=3 (the AMD manual instead
1778 * says it doesn't), so allow SS=3 in __load_segment_descriptor
1779 * and only forbid it here.
1781 if (seg == VCPU_SREG_SS && selector == 3 &&
1782 ctxt->mode == X86EMUL_MODE_PROT64)
1783 return emulate_exception(ctxt, GP_VECTOR, 0, true);
1785 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1786 X86_TRANSFER_NONE, NULL);
1789 static void write_register_operand(struct operand *op)
1791 return assign_register(op->addr.reg, op->val, op->bytes);
1794 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1798 write_register_operand(op);
1801 if (ctxt->lock_prefix)
1802 return segmented_cmpxchg(ctxt,
1808 return segmented_write(ctxt,
1814 return segmented_write(ctxt,
1817 op->bytes * op->count);
1820 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1823 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1831 return X86EMUL_CONTINUE;
1834 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1836 struct segmented_address addr;
1838 rsp_increment(ctxt, -bytes);
1839 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1840 addr.seg = VCPU_SREG_SS;
1842 return segmented_write(ctxt, addr, data, bytes);
1845 static int em_push(struct x86_emulate_ctxt *ctxt)
1847 /* Disable writeback. */
1848 ctxt->dst.type = OP_NONE;
1849 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1852 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1853 void *dest, int len)
1856 struct segmented_address addr;
1858 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1859 addr.seg = VCPU_SREG_SS;
1860 rc = segmented_read(ctxt, addr, dest, len);
1861 if (rc != X86EMUL_CONTINUE)
1864 rsp_increment(ctxt, len);
1868 static int em_pop(struct x86_emulate_ctxt *ctxt)
1870 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1873 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1874 void *dest, int len)
1877 unsigned long val, change_mask;
1878 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1879 int cpl = ctxt->ops->cpl(ctxt);
1881 rc = emulate_pop(ctxt, &val, len);
1882 if (rc != X86EMUL_CONTINUE)
1885 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1886 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1887 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1888 X86_EFLAGS_AC | X86_EFLAGS_ID;
1890 switch(ctxt->mode) {
1891 case X86EMUL_MODE_PROT64:
1892 case X86EMUL_MODE_PROT32:
1893 case X86EMUL_MODE_PROT16:
1895 change_mask |= X86_EFLAGS_IOPL;
1897 change_mask |= X86_EFLAGS_IF;
1899 case X86EMUL_MODE_VM86:
1901 return emulate_gp(ctxt, 0);
1902 change_mask |= X86_EFLAGS_IF;
1904 default: /* real mode */
1905 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1909 *(unsigned long *)dest =
1910 (ctxt->eflags & ~change_mask) | (val & change_mask);
1915 static int em_popf(struct x86_emulate_ctxt *ctxt)
1917 ctxt->dst.type = OP_REG;
1918 ctxt->dst.addr.reg = &ctxt->eflags;
1919 ctxt->dst.bytes = ctxt->op_bytes;
1920 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1923 static int em_enter(struct x86_emulate_ctxt *ctxt)
1926 unsigned frame_size = ctxt->src.val;
1927 unsigned nesting_level = ctxt->src2.val & 31;
1931 return X86EMUL_UNHANDLEABLE;
1933 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1934 rc = push(ctxt, &rbp, stack_size(ctxt));
1935 if (rc != X86EMUL_CONTINUE)
1937 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1939 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1940 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1942 return X86EMUL_CONTINUE;
1945 static int em_leave(struct x86_emulate_ctxt *ctxt)
1947 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1949 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1952 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1954 int seg = ctxt->src2.val;
1956 ctxt->src.val = get_segment_selector(ctxt, seg);
1957 if (ctxt->op_bytes == 4) {
1958 rsp_increment(ctxt, -2);
1962 return em_push(ctxt);
1965 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1967 int seg = ctxt->src2.val;
1968 unsigned long selector;
1971 rc = emulate_pop(ctxt, &selector, 2);
1972 if (rc != X86EMUL_CONTINUE)
1975 if (ctxt->modrm_reg == VCPU_SREG_SS)
1976 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1977 if (ctxt->op_bytes > 2)
1978 rsp_increment(ctxt, ctxt->op_bytes - 2);
1980 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1984 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1986 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1987 int rc = X86EMUL_CONTINUE;
1988 int reg = VCPU_REGS_RAX;
1990 while (reg <= VCPU_REGS_RDI) {
1991 (reg == VCPU_REGS_RSP) ?
1992 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1995 if (rc != X86EMUL_CONTINUE)
2004 static int em_pushf(struct x86_emulate_ctxt *ctxt)
2006 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
2007 return em_push(ctxt);
2010 static int em_popa(struct x86_emulate_ctxt *ctxt)
2012 int rc = X86EMUL_CONTINUE;
2013 int reg = VCPU_REGS_RDI;
2016 while (reg >= VCPU_REGS_RAX) {
2017 if (reg == VCPU_REGS_RSP) {
2018 rsp_increment(ctxt, ctxt->op_bytes);
2022 rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2023 if (rc != X86EMUL_CONTINUE)
2025 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2031 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2033 const struct x86_emulate_ops *ops = ctxt->ops;
2040 /* TODO: Add limit checks */
2041 ctxt->src.val = ctxt->eflags;
2043 if (rc != X86EMUL_CONTINUE)
2046 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2048 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2050 if (rc != X86EMUL_CONTINUE)
2053 ctxt->src.val = ctxt->_eip;
2055 if (rc != X86EMUL_CONTINUE)
2058 ops->get_idt(ctxt, &dt);
2060 eip_addr = dt.address + (irq << 2);
2061 cs_addr = dt.address + (irq << 2) + 2;
2063 rc = linear_read_system(ctxt, cs_addr, &cs, 2);
2064 if (rc != X86EMUL_CONTINUE)
2067 rc = linear_read_system(ctxt, eip_addr, &eip, 2);
2068 if (rc != X86EMUL_CONTINUE)
2071 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2072 if (rc != X86EMUL_CONTINUE)
2080 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2084 invalidate_registers(ctxt);
2085 rc = __emulate_int_real(ctxt, irq);
2086 if (rc == X86EMUL_CONTINUE)
2087 writeback_registers(ctxt);
2091 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2093 switch(ctxt->mode) {
2094 case X86EMUL_MODE_REAL:
2095 return __emulate_int_real(ctxt, irq);
2096 case X86EMUL_MODE_VM86:
2097 case X86EMUL_MODE_PROT16:
2098 case X86EMUL_MODE_PROT32:
2099 case X86EMUL_MODE_PROT64:
2101 /* Protected mode interrupts unimplemented yet */
2102 return X86EMUL_UNHANDLEABLE;
2106 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2108 int rc = X86EMUL_CONTINUE;
2109 unsigned long temp_eip = 0;
2110 unsigned long temp_eflags = 0;
2111 unsigned long cs = 0;
2112 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2113 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2114 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2115 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2116 X86_EFLAGS_AC | X86_EFLAGS_ID |
2118 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2121 /* TODO: Add stack limit check */
2123 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2125 if (rc != X86EMUL_CONTINUE)
2128 if (temp_eip & ~0xffff)
2129 return emulate_gp(ctxt, 0);
2131 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2133 if (rc != X86EMUL_CONTINUE)
2136 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2138 if (rc != X86EMUL_CONTINUE)
2141 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2143 if (rc != X86EMUL_CONTINUE)
2146 ctxt->_eip = temp_eip;
2148 if (ctxt->op_bytes == 4)
2149 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2150 else if (ctxt->op_bytes == 2) {
2151 ctxt->eflags &= ~0xffff;
2152 ctxt->eflags |= temp_eflags;
2155 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2156 ctxt->eflags |= X86_EFLAGS_FIXED;
2157 ctxt->ops->set_nmi_mask(ctxt, false);
2162 static int em_iret(struct x86_emulate_ctxt *ctxt)
2164 switch(ctxt->mode) {
2165 case X86EMUL_MODE_REAL:
2166 return emulate_iret_real(ctxt);
2167 case X86EMUL_MODE_VM86:
2168 case X86EMUL_MODE_PROT16:
2169 case X86EMUL_MODE_PROT32:
2170 case X86EMUL_MODE_PROT64:
2172 /* iret from protected mode unimplemented yet */
2173 return X86EMUL_UNHANDLEABLE;
2177 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2181 struct desc_struct new_desc;
2182 u8 cpl = ctxt->ops->cpl(ctxt);
2184 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2186 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2187 X86_TRANSFER_CALL_JMP,
2189 if (rc != X86EMUL_CONTINUE)
2192 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2193 /* Error handling is not implemented. */
2194 if (rc != X86EMUL_CONTINUE)
2195 return X86EMUL_UNHANDLEABLE;
2200 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2202 return assign_eip_near(ctxt, ctxt->src.val);
2205 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2210 old_eip = ctxt->_eip;
2211 rc = assign_eip_near(ctxt, ctxt->src.val);
2212 if (rc != X86EMUL_CONTINUE)
2214 ctxt->src.val = old_eip;
2219 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2221 u64 old = ctxt->dst.orig_val64;
2223 if (ctxt->dst.bytes == 16)
2224 return X86EMUL_UNHANDLEABLE;
2226 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2227 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2228 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2229 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2230 ctxt->eflags &= ~X86_EFLAGS_ZF;
2232 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2233 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2235 ctxt->eflags |= X86_EFLAGS_ZF;
2237 return X86EMUL_CONTINUE;
2240 static int em_ret(struct x86_emulate_ctxt *ctxt)
2245 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2246 if (rc != X86EMUL_CONTINUE)
2249 return assign_eip_near(ctxt, eip);
2252 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2255 unsigned long eip, cs;
2256 int cpl = ctxt->ops->cpl(ctxt);
2257 struct desc_struct new_desc;
2259 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2260 if (rc != X86EMUL_CONTINUE)
2262 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2263 if (rc != X86EMUL_CONTINUE)
2265 /* Outer-privilege level return is not implemented */
2266 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2267 return X86EMUL_UNHANDLEABLE;
2268 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2271 if (rc != X86EMUL_CONTINUE)
2273 rc = assign_eip_far(ctxt, eip, &new_desc);
2274 /* Error handling is not implemented. */
2275 if (rc != X86EMUL_CONTINUE)
2276 return X86EMUL_UNHANDLEABLE;
2281 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2285 rc = em_ret_far(ctxt);
2286 if (rc != X86EMUL_CONTINUE)
2288 rsp_increment(ctxt, ctxt->src.val);
2289 return X86EMUL_CONTINUE;
2292 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2294 /* Save real source value, then compare EAX against destination. */
2295 ctxt->dst.orig_val = ctxt->dst.val;
2296 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2297 ctxt->src.orig_val = ctxt->src.val;
2298 ctxt->src.val = ctxt->dst.orig_val;
2299 fastop(ctxt, em_cmp);
2301 if (ctxt->eflags & X86_EFLAGS_ZF) {
2302 /* Success: write back to memory; no update of EAX */
2303 ctxt->src.type = OP_NONE;
2304 ctxt->dst.val = ctxt->src.orig_val;
2306 /* Failure: write the value we saw to EAX. */
2307 ctxt->src.type = OP_REG;
2308 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2309 ctxt->src.val = ctxt->dst.orig_val;
2310 /* Create write-cycle to dest by writing the same value */
2311 ctxt->dst.val = ctxt->dst.orig_val;
2313 return X86EMUL_CONTINUE;
2316 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2318 int seg = ctxt->src2.val;
2322 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2324 rc = load_segment_descriptor(ctxt, sel, seg);
2325 if (rc != X86EMUL_CONTINUE)
2328 ctxt->dst.val = ctxt->src.val;
2332 static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
2334 u32 eax, ebx, ecx, edx;
2338 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2339 return edx & bit(X86_FEATURE_LM);
2342 static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
2344 desc->g = (flags >> 23) & 1;
2345 desc->d = (flags >> 22) & 1;
2346 desc->l = (flags >> 21) & 1;
2347 desc->avl = (flags >> 20) & 1;
2348 desc->p = (flags >> 15) & 1;
2349 desc->dpl = (flags >> 13) & 3;
2350 desc->s = (flags >> 12) & 1;
2351 desc->type = (flags >> 8) & 15;
2354 static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate,
2357 struct desc_struct desc;
2361 selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4);
2364 offset = 0x7f84 + n * 12;
2366 offset = 0x7f2c + (n - 3) * 12;
2368 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
2369 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
2370 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset));
2371 ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
2372 return X86EMUL_CONTINUE;
2375 static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate,
2378 struct desc_struct desc;
2383 offset = 0x7e00 + n * 16;
2385 selector = GET_SMSTATE(u16, smstate, offset);
2386 rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8);
2387 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
2388 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
2389 base3 = GET_SMSTATE(u32, smstate, offset + 12);
2391 ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
2392 return X86EMUL_CONTINUE;
2395 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2396 u64 cr0, u64 cr3, u64 cr4)
2401 /* In order to later set CR4.PCIDE, CR3[11:0] must be zero. */
2403 if (cr4 & X86_CR4_PCIDE) {
2408 bad = ctxt->ops->set_cr(ctxt, 3, cr3);
2410 return X86EMUL_UNHANDLEABLE;
2413 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
2414 * Then enable protected mode. However, PCID cannot be enabled
2415 * if EFER.LMA=0, so set it separately.
2417 bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2419 return X86EMUL_UNHANDLEABLE;
2421 bad = ctxt->ops->set_cr(ctxt, 0, cr0);
2423 return X86EMUL_UNHANDLEABLE;
2425 if (cr4 & X86_CR4_PCIDE) {
2426 bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2428 return X86EMUL_UNHANDLEABLE;
2430 bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
2432 return X86EMUL_UNHANDLEABLE;
2437 return X86EMUL_CONTINUE;
2440 static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
2441 const char *smstate)
2443 struct desc_struct desc;
2446 u32 val, cr0, cr3, cr4;
2449 cr0 = GET_SMSTATE(u32, smstate, 0x7ffc);
2450 cr3 = GET_SMSTATE(u32, smstate, 0x7ff8);
2451 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED;
2452 ctxt->_eip = GET_SMSTATE(u32, smstate, 0x7ff0);
2454 for (i = 0; i < 8; i++)
2455 *reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
2457 val = GET_SMSTATE(u32, smstate, 0x7fcc);
2458 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2459 val = GET_SMSTATE(u32, smstate, 0x7fc8);
2460 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2462 selector = GET_SMSTATE(u32, smstate, 0x7fc4);
2463 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64));
2464 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f60));
2465 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f5c));
2466 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
2468 selector = GET_SMSTATE(u32, smstate, 0x7fc0);
2469 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f80));
2470 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f7c));
2471 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f78));
2472 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
2474 dt.address = GET_SMSTATE(u32, smstate, 0x7f74);
2475 dt.size = GET_SMSTATE(u32, smstate, 0x7f70);
2476 ctxt->ops->set_gdt(ctxt, &dt);
2478 dt.address = GET_SMSTATE(u32, smstate, 0x7f58);
2479 dt.size = GET_SMSTATE(u32, smstate, 0x7f54);
2480 ctxt->ops->set_idt(ctxt, &dt);
2482 for (i = 0; i < 6; i++) {
2483 int r = rsm_load_seg_32(ctxt, smstate, i);
2484 if (r != X86EMUL_CONTINUE)
2488 cr4 = GET_SMSTATE(u32, smstate, 0x7f14);
2490 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8));
2492 return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2495 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
2496 const char *smstate)
2498 struct desc_struct desc;
2500 u64 val, cr0, cr3, cr4;
2505 for (i = 0; i < 16; i++)
2506 *reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8);
2508 ctxt->_eip = GET_SMSTATE(u64, smstate, 0x7f78);
2509 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
2511 val = GET_SMSTATE(u32, smstate, 0x7f68);
2512 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2513 val = GET_SMSTATE(u32, smstate, 0x7f60);
2514 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2516 cr0 = GET_SMSTATE(u64, smstate, 0x7f58);
2517 cr3 = GET_SMSTATE(u64, smstate, 0x7f50);
2518 cr4 = GET_SMSTATE(u64, smstate, 0x7f48);
2519 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
2520 val = GET_SMSTATE(u64, smstate, 0x7ed0);
2521 ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
2523 selector = GET_SMSTATE(u32, smstate, 0x7e90);
2524 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8);
2525 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e94));
2526 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e98));
2527 base3 = GET_SMSTATE(u32, smstate, 0x7e9c);
2528 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
2530 dt.size = GET_SMSTATE(u32, smstate, 0x7e84);
2531 dt.address = GET_SMSTATE(u64, smstate, 0x7e88);
2532 ctxt->ops->set_idt(ctxt, &dt);
2534 selector = GET_SMSTATE(u32, smstate, 0x7e70);
2535 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e72) << 8);
2536 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e74));
2537 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e78));
2538 base3 = GET_SMSTATE(u32, smstate, 0x7e7c);
2539 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
2541 dt.size = GET_SMSTATE(u32, smstate, 0x7e64);
2542 dt.address = GET_SMSTATE(u64, smstate, 0x7e68);
2543 ctxt->ops->set_gdt(ctxt, &dt);
2545 r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2546 if (r != X86EMUL_CONTINUE)
2549 for (i = 0; i < 6; i++) {
2550 r = rsm_load_seg_64(ctxt, smstate, i);
2551 if (r != X86EMUL_CONTINUE)
2555 return X86EMUL_CONTINUE;
2558 static int em_rsm(struct x86_emulate_ctxt *ctxt)
2560 unsigned long cr0, cr4, efer;
2565 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
2566 return emulate_ud(ctxt);
2568 smbase = ctxt->ops->get_smbase(ctxt);
2570 ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf));
2571 if (ret != X86EMUL_CONTINUE)
2572 return X86EMUL_UNHANDLEABLE;
2575 * Get back to real mode, to prepare a safe state in which to load
2576 * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
2577 * supports long mode.
2579 cr4 = ctxt->ops->get_cr(ctxt, 4);
2580 if (emulator_has_longmode(ctxt)) {
2581 struct desc_struct cs_desc;
2583 /* Zero CR4.PCIDE before CR0.PG. */
2584 if (cr4 & X86_CR4_PCIDE) {
2585 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2586 cr4 &= ~X86_CR4_PCIDE;
2589 /* A 32-bit code segment is required to clear EFER.LMA. */
2590 memset(&cs_desc, 0, sizeof(cs_desc));
2592 cs_desc.s = cs_desc.g = cs_desc.p = 1;
2593 ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
2596 /* For the 64-bit case, this will clear EFER.LMA. */
2597 cr0 = ctxt->ops->get_cr(ctxt, 0);
2598 if (cr0 & X86_CR0_PE)
2599 ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2601 /* Now clear CR4.PAE (which must be done before clearing EFER.LME). */
2602 if (cr4 & X86_CR4_PAE)
2603 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2605 /* And finally go back to 32-bit mode. */
2607 ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2610 * Give pre_leave_smm() a chance to make ISA-specific changes to the
2611 * vCPU state (e.g. enter guest mode) before loading state from the SMM
2614 if (ctxt->ops->pre_leave_smm(ctxt, buf))
2615 return X86EMUL_UNHANDLEABLE;
2617 if (emulator_has_longmode(ctxt))
2618 ret = rsm_load_state_64(ctxt, buf);
2620 ret = rsm_load_state_32(ctxt, buf);
2622 if (ret != X86EMUL_CONTINUE) {
2623 /* FIXME: should triple fault */
2624 return X86EMUL_UNHANDLEABLE;
2627 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2628 ctxt->ops->set_nmi_mask(ctxt, false);
2630 ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
2631 ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
2633 ctxt->ops->post_leave_smm(ctxt);
2635 return X86EMUL_CONTINUE;
2639 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2640 struct desc_struct *cs, struct desc_struct *ss)
2642 cs->l = 0; /* will be adjusted later */
2643 set_desc_base(cs, 0); /* flat segment */
2644 cs->g = 1; /* 4kb granularity */
2645 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2646 cs->type = 0x0b; /* Read, Execute, Accessed */
2648 cs->dpl = 0; /* will be adjusted later */
2653 set_desc_base(ss, 0); /* flat segment */
2654 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2655 ss->g = 1; /* 4kb granularity */
2657 ss->type = 0x03; /* Read/Write, Accessed */
2658 ss->d = 1; /* 32bit stack segment */
2665 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2667 u32 eax, ebx, ecx, edx;
2670 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2671 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2672 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2673 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2676 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2678 const struct x86_emulate_ops *ops = ctxt->ops;
2679 u32 eax, ebx, ecx, edx;
2682 * syscall should always be enabled in longmode - so only become
2683 * vendor specific (cpuid) if other modes are active...
2685 if (ctxt->mode == X86EMUL_MODE_PROT64)
2690 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2692 * Intel ("GenuineIntel")
2693 * remark: Intel CPUs only support "syscall" in 64bit
2694 * longmode. Also an 64bit guest with a
2695 * 32bit compat-app running will #UD !! While this
2696 * behaviour can be fixed (by emulating) into AMD
2697 * response - CPUs of AMD can't behave like Intel.
2699 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2700 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2701 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2704 /* AMD ("AuthenticAMD") */
2705 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2706 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2707 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2710 /* AMD ("AMDisbetter!") */
2711 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2712 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2713 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2716 /* Hygon ("HygonGenuine") */
2717 if (ebx == X86EMUL_CPUID_VENDOR_HygonGenuine_ebx &&
2718 ecx == X86EMUL_CPUID_VENDOR_HygonGenuine_ecx &&
2719 edx == X86EMUL_CPUID_VENDOR_HygonGenuine_edx)
2723 * default: (not Intel, not AMD, not Hygon), apply Intel's
2729 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2731 const struct x86_emulate_ops *ops = ctxt->ops;
2732 struct desc_struct cs, ss;
2737 /* syscall is not available in real mode */
2738 if (ctxt->mode == X86EMUL_MODE_REAL ||
2739 ctxt->mode == X86EMUL_MODE_VM86)
2740 return emulate_ud(ctxt);
2742 if (!(em_syscall_is_enabled(ctxt)))
2743 return emulate_ud(ctxt);
2745 ops->get_msr(ctxt, MSR_EFER, &efer);
2746 setup_syscalls_segments(ctxt, &cs, &ss);
2748 if (!(efer & EFER_SCE))
2749 return emulate_ud(ctxt);
2751 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2753 cs_sel = (u16)(msr_data & 0xfffc);
2754 ss_sel = (u16)(msr_data + 8);
2756 if (efer & EFER_LMA) {
2760 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2761 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2763 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2764 if (efer & EFER_LMA) {
2765 #ifdef CONFIG_X86_64
2766 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2769 ctxt->mode == X86EMUL_MODE_PROT64 ?
2770 MSR_LSTAR : MSR_CSTAR, &msr_data);
2771 ctxt->_eip = msr_data;
2773 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2774 ctxt->eflags &= ~msr_data;
2775 ctxt->eflags |= X86_EFLAGS_FIXED;
2779 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2780 ctxt->_eip = (u32)msr_data;
2782 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2785 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2786 return X86EMUL_CONTINUE;
2789 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2791 const struct x86_emulate_ops *ops = ctxt->ops;
2792 struct desc_struct cs, ss;
2797 ops->get_msr(ctxt, MSR_EFER, &efer);
2798 /* inject #GP if in real mode */
2799 if (ctxt->mode == X86EMUL_MODE_REAL)
2800 return emulate_gp(ctxt, 0);
2803 * Not recognized on AMD in compat mode (but is recognized in legacy
2806 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2807 && !vendor_intel(ctxt))
2808 return emulate_ud(ctxt);
2810 /* sysenter/sysexit have not been tested in 64bit mode. */
2811 if (ctxt->mode == X86EMUL_MODE_PROT64)
2812 return X86EMUL_UNHANDLEABLE;
2814 setup_syscalls_segments(ctxt, &cs, &ss);
2816 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2817 if ((msr_data & 0xfffc) == 0x0)
2818 return emulate_gp(ctxt, 0);
2820 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2821 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2822 ss_sel = cs_sel + 8;
2823 if (efer & EFER_LMA) {
2828 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2829 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2831 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2832 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2834 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2835 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2838 return X86EMUL_CONTINUE;
2841 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2843 const struct x86_emulate_ops *ops = ctxt->ops;
2844 struct desc_struct cs, ss;
2845 u64 msr_data, rcx, rdx;
2847 u16 cs_sel = 0, ss_sel = 0;
2849 /* inject #GP if in real mode or Virtual 8086 mode */
2850 if (ctxt->mode == X86EMUL_MODE_REAL ||
2851 ctxt->mode == X86EMUL_MODE_VM86)
2852 return emulate_gp(ctxt, 0);
2854 setup_syscalls_segments(ctxt, &cs, &ss);
2856 if ((ctxt->rex_prefix & 0x8) != 0x0)
2857 usermode = X86EMUL_MODE_PROT64;
2859 usermode = X86EMUL_MODE_PROT32;
2861 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2862 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2866 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2868 case X86EMUL_MODE_PROT32:
2869 cs_sel = (u16)(msr_data + 16);
2870 if ((msr_data & 0xfffc) == 0x0)
2871 return emulate_gp(ctxt, 0);
2872 ss_sel = (u16)(msr_data + 24);
2876 case X86EMUL_MODE_PROT64:
2877 cs_sel = (u16)(msr_data + 32);
2878 if (msr_data == 0x0)
2879 return emulate_gp(ctxt, 0);
2880 ss_sel = cs_sel + 8;
2883 if (emul_is_noncanonical_address(rcx, ctxt) ||
2884 emul_is_noncanonical_address(rdx, ctxt))
2885 return emulate_gp(ctxt, 0);
2888 cs_sel |= SEGMENT_RPL_MASK;
2889 ss_sel |= SEGMENT_RPL_MASK;
2891 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2892 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2895 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2897 return X86EMUL_CONTINUE;
2900 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2903 if (ctxt->mode == X86EMUL_MODE_REAL)
2905 if (ctxt->mode == X86EMUL_MODE_VM86)
2907 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2908 return ctxt->ops->cpl(ctxt) > iopl;
2911 #define VMWARE_PORT_VMPORT (0x5658)
2912 #define VMWARE_PORT_VMRPC (0x5659)
2914 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2917 const struct x86_emulate_ops *ops = ctxt->ops;
2918 struct desc_struct tr_seg;
2921 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2922 unsigned mask = (1 << len) - 1;
2926 * VMware allows access to these ports even if denied
2927 * by TSS I/O permission bitmap. Mimic behavior.
2929 if (enable_vmware_backdoor &&
2930 ((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC)))
2933 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2936 if (desc_limit_scaled(&tr_seg) < 103)
2938 base = get_desc_base(&tr_seg);
2939 #ifdef CONFIG_X86_64
2940 base |= ((u64)base3) << 32;
2942 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
2943 if (r != X86EMUL_CONTINUE)
2945 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2947 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
2948 if (r != X86EMUL_CONTINUE)
2950 if ((perm >> bit_idx) & mask)
2955 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2961 if (emulator_bad_iopl(ctxt))
2962 if (!emulator_io_port_access_allowed(ctxt, port, len))
2965 ctxt->perm_ok = true;
2970 static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2973 * Intel CPUs mask the counter and pointers in quite strange
2974 * manner when ECX is zero due to REP-string optimizations.
2976 #ifdef CONFIG_X86_64
2977 if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
2980 *reg_write(ctxt, VCPU_REGS_RCX) = 0;
2983 case 0xa4: /* movsb */
2984 case 0xa5: /* movsd/w */
2985 *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
2987 case 0xaa: /* stosb */
2988 case 0xab: /* stosd/w */
2989 *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
2994 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2995 struct tss_segment_16 *tss)
2997 tss->ip = ctxt->_eip;
2998 tss->flag = ctxt->eflags;
2999 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
3000 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
3001 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
3002 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
3003 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
3004 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
3005 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
3006 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
3008 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3009 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3010 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3011 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3012 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
3015 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
3016 struct tss_segment_16 *tss)
3021 ctxt->_eip = tss->ip;
3022 ctxt->eflags = tss->flag | 2;
3023 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
3024 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
3025 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
3026 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
3027 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
3028 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
3029 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
3030 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
3033 * SDM says that segment selectors are loaded before segment
3036 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
3037 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3038 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3039 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3040 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3045 * Now load segment descriptors. If fault happens at this stage
3046 * it is handled in a context of new task
3048 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
3049 X86_TRANSFER_TASK_SWITCH, NULL);
3050 if (ret != X86EMUL_CONTINUE)
3052 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3053 X86_TRANSFER_TASK_SWITCH, NULL);
3054 if (ret != X86EMUL_CONTINUE)
3056 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3057 X86_TRANSFER_TASK_SWITCH, NULL);
3058 if (ret != X86EMUL_CONTINUE)
3060 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3061 X86_TRANSFER_TASK_SWITCH, NULL);
3062 if (ret != X86EMUL_CONTINUE)
3064 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3065 X86_TRANSFER_TASK_SWITCH, NULL);
3066 if (ret != X86EMUL_CONTINUE)
3069 return X86EMUL_CONTINUE;
3072 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
3073 u16 tss_selector, u16 old_tss_sel,
3074 ulong old_tss_base, struct desc_struct *new_desc)
3076 struct tss_segment_16 tss_seg;
3078 u32 new_tss_base = get_desc_base(new_desc);
3080 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3081 if (ret != X86EMUL_CONTINUE)
3084 save_state_to_tss16(ctxt, &tss_seg);
3086 ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3087 if (ret != X86EMUL_CONTINUE)
3090 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3091 if (ret != X86EMUL_CONTINUE)
3094 if (old_tss_sel != 0xffff) {
3095 tss_seg.prev_task_link = old_tss_sel;
3097 ret = linear_write_system(ctxt, new_tss_base,
3098 &tss_seg.prev_task_link,
3099 sizeof(tss_seg.prev_task_link));
3100 if (ret != X86EMUL_CONTINUE)
3104 return load_state_from_tss16(ctxt, &tss_seg);
3107 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
3108 struct tss_segment_32 *tss)
3110 /* CR3 and ldt selector are not saved intentionally */
3111 tss->eip = ctxt->_eip;
3112 tss->eflags = ctxt->eflags;
3113 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
3114 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
3115 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
3116 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
3117 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
3118 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
3119 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
3120 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3122 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3123 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3124 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3125 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3126 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
3127 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3130 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
3131 struct tss_segment_32 *tss)
3136 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3137 return emulate_gp(ctxt, 0);
3138 ctxt->_eip = tss->eip;
3139 ctxt->eflags = tss->eflags | 2;
3141 /* General purpose registers */
3142 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
3143 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
3144 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
3145 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
3146 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
3147 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
3148 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
3149 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3152 * SDM says that segment selectors are loaded before segment
3153 * descriptors. This is important because CPL checks will
3156 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
3157 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3158 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3159 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3160 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3161 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
3162 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3165 * If we're switching between Protected Mode and VM86, we need to make
3166 * sure to update the mode before loading the segment descriptors so
3167 * that the selectors are interpreted correctly.
3169 if (ctxt->eflags & X86_EFLAGS_VM) {
3170 ctxt->mode = X86EMUL_MODE_VM86;
3173 ctxt->mode = X86EMUL_MODE_PROT32;
3178 * Now load segment descriptors. If fault happenes at this stage
3179 * it is handled in a context of new task
3181 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3182 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3183 if (ret != X86EMUL_CONTINUE)
3185 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3186 X86_TRANSFER_TASK_SWITCH, NULL);
3187 if (ret != X86EMUL_CONTINUE)
3189 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3190 X86_TRANSFER_TASK_SWITCH, NULL);
3191 if (ret != X86EMUL_CONTINUE)
3193 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3194 X86_TRANSFER_TASK_SWITCH, NULL);
3195 if (ret != X86EMUL_CONTINUE)
3197 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3198 X86_TRANSFER_TASK_SWITCH, NULL);
3199 if (ret != X86EMUL_CONTINUE)
3201 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3202 X86_TRANSFER_TASK_SWITCH, NULL);
3203 if (ret != X86EMUL_CONTINUE)
3205 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3206 X86_TRANSFER_TASK_SWITCH, NULL);
3211 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
3212 u16 tss_selector, u16 old_tss_sel,
3213 ulong old_tss_base, struct desc_struct *new_desc)
3215 struct tss_segment_32 tss_seg;
3217 u32 new_tss_base = get_desc_base(new_desc);
3218 u32 eip_offset = offsetof(struct tss_segment_32, eip);
3219 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3221 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3222 if (ret != X86EMUL_CONTINUE)
3225 save_state_to_tss32(ctxt, &tss_seg);
3227 /* Only GP registers and segment selectors are saved */
3228 ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3229 ldt_sel_offset - eip_offset);
3230 if (ret != X86EMUL_CONTINUE)
3233 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3234 if (ret != X86EMUL_CONTINUE)
3237 if (old_tss_sel != 0xffff) {
3238 tss_seg.prev_task_link = old_tss_sel;
3240 ret = linear_write_system(ctxt, new_tss_base,
3241 &tss_seg.prev_task_link,
3242 sizeof(tss_seg.prev_task_link));
3243 if (ret != X86EMUL_CONTINUE)
3247 return load_state_from_tss32(ctxt, &tss_seg);
3250 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3251 u16 tss_selector, int idt_index, int reason,
3252 bool has_error_code, u32 error_code)
3254 const struct x86_emulate_ops *ops = ctxt->ops;
3255 struct desc_struct curr_tss_desc, next_tss_desc;
3257 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3258 ulong old_tss_base =
3259 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3261 ulong desc_addr, dr7;
3263 /* FIXME: old_tss_base == ~0 ? */
3265 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3266 if (ret != X86EMUL_CONTINUE)
3268 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3269 if (ret != X86EMUL_CONTINUE)
3272 /* FIXME: check that next_tss_desc is tss */
3275 * Check privileges. The three cases are task switch caused by...
3277 * 1. jmp/call/int to task gate: Check against DPL of the task gate
3278 * 2. Exception/IRQ/iret: No check is performed
3279 * 3. jmp/call to TSS/task-gate: No check is performed since the
3280 * hardware checks it before exiting.
3282 if (reason == TASK_SWITCH_GATE) {
3283 if (idt_index != -1) {
3284 /* Software interrupts */
3285 struct desc_struct task_gate_desc;
3288 ret = read_interrupt_descriptor(ctxt, idt_index,
3290 if (ret != X86EMUL_CONTINUE)
3293 dpl = task_gate_desc.dpl;
3294 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
3295 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
3299 desc_limit = desc_limit_scaled(&next_tss_desc);
3300 if (!next_tss_desc.p ||
3301 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
3302 desc_limit < 0x2b)) {
3303 return emulate_ts(ctxt, tss_selector & 0xfffc);
3306 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3307 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
3308 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3311 if (reason == TASK_SWITCH_IRET)
3312 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
3314 /* set back link to prev task only if NT bit is set in eflags
3315 note that old_tss_sel is not used after this point */
3316 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
3317 old_tss_sel = 0xffff;
3319 if (next_tss_desc.type & 8)
3320 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3321 old_tss_base, &next_tss_desc);
3323 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3324 old_tss_base, &next_tss_desc);
3325 if (ret != X86EMUL_CONTINUE)
3328 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
3329 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3331 if (reason != TASK_SWITCH_IRET) {
3332 next_tss_desc.type |= (1 << 1); /* set busy flag */
3333 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3336 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
3337 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3339 if (has_error_code) {
3340 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3341 ctxt->lock_prefix = 0;
3342 ctxt->src.val = (unsigned long) error_code;
3343 ret = em_push(ctxt);
3346 ops->get_dr(ctxt, 7, &dr7);
3347 ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3352 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3353 u16 tss_selector, int idt_index, int reason,
3354 bool has_error_code, u32 error_code)
3358 invalidate_registers(ctxt);
3359 ctxt->_eip = ctxt->eip;
3360 ctxt->dst.type = OP_NONE;
3362 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3363 has_error_code, error_code);
3365 if (rc == X86EMUL_CONTINUE) {
3366 ctxt->eip = ctxt->_eip;
3367 writeback_registers(ctxt);
3370 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3373 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3376 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3378 register_address_increment(ctxt, reg, df * op->bytes);
3379 op->addr.mem.ea = register_address(ctxt, reg);
3382 static int em_das(struct x86_emulate_ctxt *ctxt)
3385 bool af, cf, old_cf;
3387 cf = ctxt->eflags & X86_EFLAGS_CF;
3393 af = ctxt->eflags & X86_EFLAGS_AF;
3394 if ((al & 0x0f) > 9 || af) {
3396 cf = old_cf | (al >= 250);
3401 if (old_al > 0x99 || old_cf) {
3407 /* Set PF, ZF, SF */
3408 ctxt->src.type = OP_IMM;
3410 ctxt->src.bytes = 1;
3411 fastop(ctxt, em_or);
3412 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3414 ctxt->eflags |= X86_EFLAGS_CF;
3416 ctxt->eflags |= X86_EFLAGS_AF;
3417 return X86EMUL_CONTINUE;
3420 static int em_aam(struct x86_emulate_ctxt *ctxt)
3424 if (ctxt->src.val == 0)
3425 return emulate_de(ctxt);
3427 al = ctxt->dst.val & 0xff;
3428 ah = al / ctxt->src.val;
3429 al %= ctxt->src.val;
3431 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3433 /* Set PF, ZF, SF */
3434 ctxt->src.type = OP_IMM;
3436 ctxt->src.bytes = 1;
3437 fastop(ctxt, em_or);
3439 return X86EMUL_CONTINUE;
3442 static int em_aad(struct x86_emulate_ctxt *ctxt)
3444 u8 al = ctxt->dst.val & 0xff;
3445 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3447 al = (al + (ah * ctxt->src.val)) & 0xff;
3449 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3451 /* Set PF, ZF, SF */
3452 ctxt->src.type = OP_IMM;
3454 ctxt->src.bytes = 1;
3455 fastop(ctxt, em_or);
3457 return X86EMUL_CONTINUE;
3460 static int em_call(struct x86_emulate_ctxt *ctxt)
3463 long rel = ctxt->src.val;
3465 ctxt->src.val = (unsigned long)ctxt->_eip;
3466 rc = jmp_rel(ctxt, rel);
3467 if (rc != X86EMUL_CONTINUE)
3469 return em_push(ctxt);
3472 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3477 struct desc_struct old_desc, new_desc;
3478 const struct x86_emulate_ops *ops = ctxt->ops;
3479 int cpl = ctxt->ops->cpl(ctxt);
3480 enum x86emul_mode prev_mode = ctxt->mode;
3482 old_eip = ctxt->_eip;
3483 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3485 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3486 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3487 X86_TRANSFER_CALL_JMP, &new_desc);
3488 if (rc != X86EMUL_CONTINUE)
3491 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3492 if (rc != X86EMUL_CONTINUE)
3495 ctxt->src.val = old_cs;
3497 if (rc != X86EMUL_CONTINUE)
3500 ctxt->src.val = old_eip;
3502 /* If we failed, we tainted the memory, but the very least we should
3504 if (rc != X86EMUL_CONTINUE) {
3505 pr_warn_once("faulting far call emulation tainted memory\n");
3510 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3511 ctxt->mode = prev_mode;
3516 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3521 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3522 if (rc != X86EMUL_CONTINUE)
3524 rc = assign_eip_near(ctxt, eip);
3525 if (rc != X86EMUL_CONTINUE)
3527 rsp_increment(ctxt, ctxt->src.val);
3528 return X86EMUL_CONTINUE;
3531 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3533 /* Write back the register source. */
3534 ctxt->src.val = ctxt->dst.val;
3535 write_register_operand(&ctxt->src);
3537 /* Write back the memory destination with implicit LOCK prefix. */
3538 ctxt->dst.val = ctxt->src.orig_val;
3539 ctxt->lock_prefix = 1;
3540 return X86EMUL_CONTINUE;
3543 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3545 ctxt->dst.val = ctxt->src2.val;
3546 return fastop(ctxt, em_imul);
3549 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3551 ctxt->dst.type = OP_REG;
3552 ctxt->dst.bytes = ctxt->src.bytes;
3553 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3554 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3556 return X86EMUL_CONTINUE;
3559 static int em_rdpid(struct x86_emulate_ctxt *ctxt)
3563 if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
3564 return emulate_gp(ctxt, 0);
3565 ctxt->dst.val = tsc_aux;
3566 return X86EMUL_CONTINUE;
3569 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3573 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3574 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3575 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3576 return X86EMUL_CONTINUE;
3579 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3583 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3584 return emulate_gp(ctxt, 0);
3585 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3586 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3587 return X86EMUL_CONTINUE;
3590 static int em_mov(struct x86_emulate_ctxt *ctxt)
3592 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3593 return X86EMUL_CONTINUE;
3596 #define FFL(x) bit(X86_FEATURE_##x)
3598 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3600 u32 ebx, ecx, edx, eax = 1;
3604 * Check MOVBE is set in the guest-visible CPUID leaf.
3606 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3607 if (!(ecx & FFL(MOVBE)))
3608 return emulate_ud(ctxt);
3610 switch (ctxt->op_bytes) {
3613 * From MOVBE definition: "...When the operand size is 16 bits,
3614 * the upper word of the destination register remains unchanged
3617 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3618 * rules so we have to do the operation almost per hand.
3620 tmp = (u16)ctxt->src.val;
3621 ctxt->dst.val &= ~0xffffUL;
3622 ctxt->dst.val |= (unsigned long)swab16(tmp);
3625 ctxt->dst.val = swab32((u32)ctxt->src.val);
3628 ctxt->dst.val = swab64(ctxt->src.val);
3633 return X86EMUL_CONTINUE;
3636 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3638 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3639 return emulate_gp(ctxt, 0);
3641 /* Disable writeback. */
3642 ctxt->dst.type = OP_NONE;
3643 return X86EMUL_CONTINUE;
3646 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3650 if (ctxt->mode == X86EMUL_MODE_PROT64)
3651 val = ctxt->src.val & ~0ULL;
3653 val = ctxt->src.val & ~0U;
3655 /* #UD condition is already handled. */
3656 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3657 return emulate_gp(ctxt, 0);
3659 /* Disable writeback. */
3660 ctxt->dst.type = OP_NONE;
3661 return X86EMUL_CONTINUE;
3664 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3668 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3669 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3670 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3671 return emulate_gp(ctxt, 0);
3673 return X86EMUL_CONTINUE;
3676 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3680 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3681 return emulate_gp(ctxt, 0);
3683 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3684 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3685 return X86EMUL_CONTINUE;
3688 static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
3690 if (segment > VCPU_SREG_GS &&
3691 (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3692 ctxt->ops->cpl(ctxt) > 0)
3693 return emulate_gp(ctxt, 0);
3695 ctxt->dst.val = get_segment_selector(ctxt, segment);
3696 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3697 ctxt->dst.bytes = 2;
3698 return X86EMUL_CONTINUE;
3701 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3703 if (ctxt->modrm_reg > VCPU_SREG_GS)
3704 return emulate_ud(ctxt);
3706 return em_store_sreg(ctxt, ctxt->modrm_reg);
3709 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3711 u16 sel = ctxt->src.val;
3713 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3714 return emulate_ud(ctxt);
3716 if (ctxt->modrm_reg == VCPU_SREG_SS)
3717 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3719 /* Disable writeback. */
3720 ctxt->dst.type = OP_NONE;
3721 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3724 static int em_sldt(struct x86_emulate_ctxt *ctxt)
3726 return em_store_sreg(ctxt, VCPU_SREG_LDTR);
3729 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3731 u16 sel = ctxt->src.val;
3733 /* Disable writeback. */
3734 ctxt->dst.type = OP_NONE;
3735 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3738 static int em_str(struct x86_emulate_ctxt *ctxt)
3740 return em_store_sreg(ctxt, VCPU_SREG_TR);
3743 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3745 u16 sel = ctxt->src.val;
3747 /* Disable writeback. */
3748 ctxt->dst.type = OP_NONE;
3749 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3752 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3757 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3758 if (rc == X86EMUL_CONTINUE)
3759 ctxt->ops->invlpg(ctxt, linear);
3760 /* Disable writeback. */
3761 ctxt->dst.type = OP_NONE;
3762 return X86EMUL_CONTINUE;
3765 static int em_clts(struct x86_emulate_ctxt *ctxt)
3769 cr0 = ctxt->ops->get_cr(ctxt, 0);
3771 ctxt->ops->set_cr(ctxt, 0, cr0);
3772 return X86EMUL_CONTINUE;
3775 static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3777 int rc = ctxt->ops->fix_hypercall(ctxt);
3779 if (rc != X86EMUL_CONTINUE)
3782 /* Let the processor re-execute the fixed hypercall */
3783 ctxt->_eip = ctxt->eip;
3784 /* Disable writeback. */
3785 ctxt->dst.type = OP_NONE;
3786 return X86EMUL_CONTINUE;
3789 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3790 void (*get)(struct x86_emulate_ctxt *ctxt,
3791 struct desc_ptr *ptr))
3793 struct desc_ptr desc_ptr;
3795 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3796 ctxt->ops->cpl(ctxt) > 0)
3797 return emulate_gp(ctxt, 0);
3799 if (ctxt->mode == X86EMUL_MODE_PROT64)
3801 get(ctxt, &desc_ptr);
3802 if (ctxt->op_bytes == 2) {
3804 desc_ptr.address &= 0x00ffffff;
3806 /* Disable writeback. */
3807 ctxt->dst.type = OP_NONE;
3808 return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3809 &desc_ptr, 2 + ctxt->op_bytes);
3812 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3814 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3817 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3819 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3822 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3824 struct desc_ptr desc_ptr;
3827 if (ctxt->mode == X86EMUL_MODE_PROT64)
3829 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3830 &desc_ptr.size, &desc_ptr.address,
3832 if (rc != X86EMUL_CONTINUE)
3834 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3835 emul_is_noncanonical_address(desc_ptr.address, ctxt))
3836 return emulate_gp(ctxt, 0);
3838 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3840 ctxt->ops->set_idt(ctxt, &desc_ptr);
3841 /* Disable writeback. */
3842 ctxt->dst.type = OP_NONE;
3843 return X86EMUL_CONTINUE;
3846 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3848 return em_lgdt_lidt(ctxt, true);
3851 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3853 return em_lgdt_lidt(ctxt, false);
3856 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3858 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3859 ctxt->ops->cpl(ctxt) > 0)
3860 return emulate_gp(ctxt, 0);
3862 if (ctxt->dst.type == OP_MEM)
3863 ctxt->dst.bytes = 2;
3864 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3865 return X86EMUL_CONTINUE;
3868 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3870 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3871 | (ctxt->src.val & 0x0f));
3872 ctxt->dst.type = OP_NONE;
3873 return X86EMUL_CONTINUE;
3876 static int em_loop(struct x86_emulate_ctxt *ctxt)
3878 int rc = X86EMUL_CONTINUE;
3880 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3881 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3882 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3883 rc = jmp_rel(ctxt, ctxt->src.val);
3888 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3890 int rc = X86EMUL_CONTINUE;
3892 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3893 rc = jmp_rel(ctxt, ctxt->src.val);
3898 static int em_in(struct x86_emulate_ctxt *ctxt)
3900 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3902 return X86EMUL_IO_NEEDED;
3904 return X86EMUL_CONTINUE;
3907 static int em_out(struct x86_emulate_ctxt *ctxt)
3909 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3911 /* Disable writeback. */
3912 ctxt->dst.type = OP_NONE;
3913 return X86EMUL_CONTINUE;
3916 static int em_cli(struct x86_emulate_ctxt *ctxt)
3918 if (emulator_bad_iopl(ctxt))
3919 return emulate_gp(ctxt, 0);
3921 ctxt->eflags &= ~X86_EFLAGS_IF;
3922 return X86EMUL_CONTINUE;
3925 static int em_sti(struct x86_emulate_ctxt *ctxt)
3927 if (emulator_bad_iopl(ctxt))
3928 return emulate_gp(ctxt, 0);
3930 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3931 ctxt->eflags |= X86_EFLAGS_IF;
3932 return X86EMUL_CONTINUE;
3935 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3937 u32 eax, ebx, ecx, edx;
3940 ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3941 if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3942 ctxt->ops->cpl(ctxt)) {
3943 return emulate_gp(ctxt, 0);
3946 eax = reg_read(ctxt, VCPU_REGS_RAX);
3947 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3948 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
3949 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3950 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3951 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3952 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3953 return X86EMUL_CONTINUE;
3956 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3960 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3962 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3964 ctxt->eflags &= ~0xffUL;
3965 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3966 return X86EMUL_CONTINUE;
3969 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3971 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3972 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3973 return X86EMUL_CONTINUE;
3976 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3978 switch (ctxt->op_bytes) {
3979 #ifdef CONFIG_X86_64
3981 asm("bswap %0" : "+r"(ctxt->dst.val));
3985 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3988 return X86EMUL_CONTINUE;
3991 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3993 /* emulating clflush regardless of cpuid */
3994 return X86EMUL_CONTINUE;
3997 static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3999 ctxt->dst.val = (s32) ctxt->src.val;
4000 return X86EMUL_CONTINUE;
4003 static int check_fxsr(struct x86_emulate_ctxt *ctxt)
4005 u32 eax = 1, ebx, ecx = 0, edx;
4007 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
4008 if (!(edx & FFL(FXSR)))
4009 return emulate_ud(ctxt);
4011 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
4012 return emulate_nm(ctxt);
4015 * Don't emulate a case that should never be hit, instead of working
4016 * around a lack of fxsave64/fxrstor64 on old compilers.
4018 if (ctxt->mode >= X86EMUL_MODE_PROT64)
4019 return X86EMUL_UNHANDLEABLE;
4021 return X86EMUL_CONTINUE;
4025 * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
4026 * and restore MXCSR.
4028 static size_t __fxstate_size(int nregs)
4030 return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
4033 static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
4036 if (ctxt->mode == X86EMUL_MODE_PROT64)
4037 return __fxstate_size(16);
4039 cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
4040 return __fxstate_size(cr4_osfxsr ? 8 : 0);
4044 * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
4047 * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
4048 * preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
4050 * 3) 64-bit mode with REX.W prefix
4051 * - like (2), but XMM 8-15 are being saved and restored
4052 * 4) 64-bit mode without REX.W prefix
4053 * - like (3), but FIP and FDP are 64 bit
4055 * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
4056 * desired result. (4) is not emulated.
4058 * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
4059 * and FPU DS) should match.
4061 static int em_fxsave(struct x86_emulate_ctxt *ctxt)
4063 struct fxregs_state fx_state;
4066 rc = check_fxsr(ctxt);
4067 if (rc != X86EMUL_CONTINUE)
4070 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
4072 if (rc != X86EMUL_CONTINUE)
4075 return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
4076 fxstate_size(ctxt));
4080 * FXRSTOR might restore XMM registers not provided by the guest. Fill
4081 * in the host registers (via FXSAVE) instead, so they won't be modified.
4082 * (preemption has to stay disabled until FXRSTOR).
4084 * Use noinline to keep the stack for other functions called by callers small.
4086 static noinline int fxregs_fixup(struct fxregs_state *fx_state,
4087 const size_t used_size)
4089 struct fxregs_state fx_tmp;
4092 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
4093 memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
4094 __fxstate_size(16) - used_size);
4099 static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4101 struct fxregs_state fx_state;
4105 rc = check_fxsr(ctxt);
4106 if (rc != X86EMUL_CONTINUE)
4109 size = fxstate_size(ctxt);
4110 rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
4111 if (rc != X86EMUL_CONTINUE)
4114 if (size < __fxstate_size(16)) {
4115 rc = fxregs_fixup(&fx_state, size);
4116 if (rc != X86EMUL_CONTINUE)
4120 if (fx_state.mxcsr >> 16) {
4121 rc = emulate_gp(ctxt, 0);
4125 if (rc == X86EMUL_CONTINUE)
4126 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4132 static bool valid_cr(int nr)
4144 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
4146 if (!valid_cr(ctxt->modrm_reg))
4147 return emulate_ud(ctxt);
4149 return X86EMUL_CONTINUE;
4152 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
4154 u64 new_val = ctxt->src.val64;
4155 int cr = ctxt->modrm_reg;
4158 static u64 cr_reserved_bits[] = {
4159 0xffffffff00000000ULL,
4160 0, 0, 0, /* CR3 checked later */
4167 return emulate_ud(ctxt);
4169 if (new_val & cr_reserved_bits[cr])
4170 return emulate_gp(ctxt, 0);
4175 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
4176 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
4177 return emulate_gp(ctxt, 0);
4179 cr4 = ctxt->ops->get_cr(ctxt, 4);
4180 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4182 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
4183 !(cr4 & X86_CR4_PAE))
4184 return emulate_gp(ctxt, 0);
4191 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4192 if (efer & EFER_LMA) {
4194 u32 eax, ebx, ecx, edx;
4198 if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
4200 maxphyaddr = eax & 0xff;
4203 rsvd = rsvd_bits(maxphyaddr, 63);
4204 if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE)
4205 rsvd &= ~X86_CR3_PCID_NOFLUSH;
4209 return emulate_gp(ctxt, 0);
4214 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4216 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
4217 return emulate_gp(ctxt, 0);
4223 return X86EMUL_CONTINUE;
4226 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
4230 ctxt->ops->get_dr(ctxt, 7, &dr7);
4232 /* Check if DR7.Global_Enable is set */
4233 return dr7 & (1 << 13);
4236 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
4238 int dr = ctxt->modrm_reg;
4242 return emulate_ud(ctxt);
4244 cr4 = ctxt->ops->get_cr(ctxt, 4);
4245 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
4246 return emulate_ud(ctxt);
4248 if (check_dr7_gd(ctxt)) {
4251 ctxt->ops->get_dr(ctxt, 6, &dr6);
4253 dr6 |= DR6_BD | DR6_RTM;
4254 ctxt->ops->set_dr(ctxt, 6, dr6);
4255 return emulate_db(ctxt);
4258 return X86EMUL_CONTINUE;
4261 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
4263 u64 new_val = ctxt->src.val64;
4264 int dr = ctxt->modrm_reg;
4266 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
4267 return emulate_gp(ctxt, 0);
4269 return check_dr_read(ctxt);
4272 static int check_svme(struct x86_emulate_ctxt *ctxt)
4276 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4278 if (!(efer & EFER_SVME))
4279 return emulate_ud(ctxt);
4281 return X86EMUL_CONTINUE;
4284 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
4286 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4288 /* Valid physical address? */
4289 if (rax & 0xffff000000000000ULL)
4290 return emulate_gp(ctxt, 0);
4292 return check_svme(ctxt);
4295 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
4297 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4299 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4300 return emulate_ud(ctxt);
4302 return X86EMUL_CONTINUE;
4305 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
4307 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4308 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4311 * VMware allows access to these Pseduo-PMCs even when read via RDPMC
4312 * in Ring3 when CR4.PCE=0.
4314 if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
4315 return X86EMUL_CONTINUE;
4317 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4318 ctxt->ops->check_pmc(ctxt, rcx))
4319 return emulate_gp(ctxt, 0);
4321 return X86EMUL_CONTINUE;
4324 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
4326 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
4327 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4328 return emulate_gp(ctxt, 0);
4330 return X86EMUL_CONTINUE;
4333 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
4335 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
4336 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4337 return emulate_gp(ctxt, 0);
4339 return X86EMUL_CONTINUE;
4342 #define D(_y) { .flags = (_y) }
4343 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4344 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4345 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4346 #define N D(NotImpl)
4347 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4348 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4349 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4350 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4351 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4352 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4353 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4354 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4355 #define II(_f, _e, _i) \
4356 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4357 #define IIP(_f, _e, _i, _p) \
4358 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4359 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4360 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4362 #define D2bv(_f) D((_f) | ByteOp), D(_f)
4363 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4364 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
4365 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
4366 #define I2bvIP(_f, _e, _i, _p) \
4367 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4369 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4370 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4371 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4373 static const struct opcode group7_rm0[] = {
4375 I(SrcNone | Priv | EmulateOnUD, em_hypercall),
4379 static const struct opcode group7_rm1[] = {
4380 DI(SrcNone | Priv, monitor),
4381 DI(SrcNone | Priv, mwait),
4385 static const struct opcode group7_rm3[] = {
4386 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
4387 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
4388 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
4389 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
4390 DIP(SrcNone | Prot | Priv, stgi, check_svme),
4391 DIP(SrcNone | Prot | Priv, clgi, check_svme),
4392 DIP(SrcNone | Prot | Priv, skinit, check_svme),
4393 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
4396 static const struct opcode group7_rm7[] = {
4398 DIP(SrcNone, rdtscp, check_rdtsc),
4402 static const struct opcode group1[] = {
4404 F(Lock | PageTable, em_or),
4407 F(Lock | PageTable, em_and),
4413 static const struct opcode group1A[] = {
4414 I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4417 static const struct opcode group2[] = {
4418 F(DstMem | ModRM, em_rol),
4419 F(DstMem | ModRM, em_ror),
4420 F(DstMem | ModRM, em_rcl),
4421 F(DstMem | ModRM, em_rcr),
4422 F(DstMem | ModRM, em_shl),
4423 F(DstMem | ModRM, em_shr),
4424 F(DstMem | ModRM, em_shl),
4425 F(DstMem | ModRM, em_sar),
4428 static const struct opcode group3[] = {
4429 F(DstMem | SrcImm | NoWrite, em_test),
4430 F(DstMem | SrcImm | NoWrite, em_test),
4431 F(DstMem | SrcNone | Lock, em_not),
4432 F(DstMem | SrcNone | Lock, em_neg),
4433 F(DstXacc | Src2Mem, em_mul_ex),
4434 F(DstXacc | Src2Mem, em_imul_ex),
4435 F(DstXacc | Src2Mem, em_div_ex),
4436 F(DstXacc | Src2Mem, em_idiv_ex),
4439 static const struct opcode group4[] = {
4440 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4441 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4445 static const struct opcode group5[] = {
4446 F(DstMem | SrcNone | Lock, em_inc),
4447 F(DstMem | SrcNone | Lock, em_dec),
4448 I(SrcMem | NearBranch, em_call_near_abs),
4449 I(SrcMemFAddr | ImplicitOps, em_call_far),
4450 I(SrcMem | NearBranch, em_jmp_abs),
4451 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
4452 I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined),
4455 static const struct opcode group6[] = {
4456 II(Prot | DstMem, em_sldt, sldt),
4457 II(Prot | DstMem, em_str, str),
4458 II(Prot | Priv | SrcMem16, em_lldt, lldt),
4459 II(Prot | Priv | SrcMem16, em_ltr, ltr),
4463 static const struct group_dual group7 = { {
4464 II(Mov | DstMem, em_sgdt, sgdt),
4465 II(Mov | DstMem, em_sidt, sidt),
4466 II(SrcMem | Priv, em_lgdt, lgdt),
4467 II(SrcMem | Priv, em_lidt, lidt),
4468 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4469 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4470 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
4474 N, EXT(0, group7_rm3),
4475 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4476 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4480 static const struct opcode group8[] = {
4482 F(DstMem | SrcImmByte | NoWrite, em_bt),
4483 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
4484 F(DstMem | SrcImmByte | Lock, em_btr),
4485 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
4489 * The "memory" destination is actually always a register, since we come
4490 * from the register case of group9.
4492 static const struct gprefix pfx_0f_c7_7 = {
4493 N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdtscp),
4497 static const struct group_dual group9 = { {
4498 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4500 N, N, N, N, N, N, N,
4501 GP(0, &pfx_0f_c7_7),
4504 static const struct opcode group11[] = {
4505 I(DstMem | SrcImm | Mov | PageTable, em_mov),
4509 static const struct gprefix pfx_0f_ae_7 = {
4510 I(SrcMem | ByteOp, em_clflush), N, N, N,
4513 static const struct group_dual group15 = { {
4514 I(ModRM | Aligned16, em_fxsave),
4515 I(ModRM | Aligned16, em_fxrstor),
4516 N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4518 N, N, N, N, N, N, N, N,
4521 static const struct gprefix pfx_0f_6f_0f_7f = {
4522 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4525 static const struct instr_dual instr_dual_0f_2b = {
4529 static const struct gprefix pfx_0f_2b = {
4530 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4533 static const struct gprefix pfx_0f_10_0f_11 = {
4534 I(Unaligned, em_mov), I(Unaligned, em_mov), N, N,
4537 static const struct gprefix pfx_0f_28_0f_29 = {
4538 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4541 static const struct gprefix pfx_0f_e7 = {
4542 N, I(Sse, em_mov), N, N,
4545 static const struct escape escape_d9 = { {
4546 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4549 N, N, N, N, N, N, N, N,
4551 N, N, N, N, N, N, N, N,
4553 N, N, N, N, N, N, N, N,
4555 N, N, N, N, N, N, N, N,
4557 N, N, N, N, N, N, N, N,
4559 N, N, N, N, N, N, N, N,
4561 N, N, N, N, N, N, N, N,
4563 N, N, N, N, N, N, N, N,
4566 static const struct escape escape_db = { {
4567 N, N, N, N, N, N, N, N,
4570 N, N, N, N, N, N, N, N,
4572 N, N, N, N, N, N, N, N,
4574 N, N, N, N, N, N, N, N,
4576 N, N, N, N, N, N, N, N,
4578 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4580 N, N, N, N, N, N, N, N,
4582 N, N, N, N, N, N, N, N,
4584 N, N, N, N, N, N, N, N,
4587 static const struct escape escape_dd = { {
4588 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4591 N, N, N, N, N, N, N, N,
4593 N, N, N, N, N, N, N, N,
4595 N, N, N, N, N, N, N, N,
4597 N, N, N, N, N, N, N, N,
4599 N, N, N, N, N, N, N, N,
4601 N, N, N, N, N, N, N, N,
4603 N, N, N, N, N, N, N, N,
4605 N, N, N, N, N, N, N, N,
4608 static const struct instr_dual instr_dual_0f_c3 = {
4609 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4612 static const struct mode_dual mode_dual_63 = {
4613 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4616 static const struct opcode opcode_table[256] = {
4618 F6ALU(Lock, em_add),
4619 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4620 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4622 F6ALU(Lock | PageTable, em_or),
4623 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4626 F6ALU(Lock, em_adc),
4627 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4628 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4630 F6ALU(Lock, em_sbb),
4631 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4632 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4634 F6ALU(Lock | PageTable, em_and), N, N,
4636 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4638 F6ALU(Lock, em_xor), N, N,
4640 F6ALU(NoWrite, em_cmp), N, N,
4642 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4644 X8(I(SrcReg | Stack, em_push)),
4646 X8(I(DstReg | Stack, em_pop)),
4648 I(ImplicitOps | Stack | No64, em_pusha),
4649 I(ImplicitOps | Stack | No64, em_popa),
4650 N, MD(ModRM, &mode_dual_63),
4653 I(SrcImm | Mov | Stack, em_push),
4654 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4655 I(SrcImmByte | Mov | Stack, em_push),
4656 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4657 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4658 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4660 X16(D(SrcImmByte | NearBranch)),
4662 G(ByteOp | DstMem | SrcImm, group1),
4663 G(DstMem | SrcImm, group1),
4664 G(ByteOp | DstMem | SrcImm | No64, group1),
4665 G(DstMem | SrcImmByte, group1),
4666 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4667 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4669 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4670 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4671 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4672 D(ModRM | SrcMem | NoAccess | DstReg),
4673 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4676 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4678 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4679 I(SrcImmFAddr | No64, em_call_far), N,
4680 II(ImplicitOps | Stack, em_pushf, pushf),
4681 II(ImplicitOps | Stack, em_popf, popf),
4682 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4684 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4685 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4686 I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4687 F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4689 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4690 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4691 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4692 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4694 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4696 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4698 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4699 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4700 I(ImplicitOps | NearBranch, em_ret),
4701 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4702 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4703 G(ByteOp, group11), G(0, group11),
4705 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4706 I(ImplicitOps | SrcImmU16, em_ret_far_imm),
4707 I(ImplicitOps, em_ret_far),
4708 D(ImplicitOps), DI(SrcImmByte, intn),
4709 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4711 G(Src2One | ByteOp, group2), G(Src2One, group2),
4712 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4713 I(DstAcc | SrcImmUByte | No64, em_aam),
4714 I(DstAcc | SrcImmUByte | No64, em_aad),
4715 F(DstAcc | ByteOp | No64, em_salc),
4716 I(DstAcc | SrcXLat | ByteOp, em_mov),
4718 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4720 X3(I(SrcImmByte | NearBranch, em_loop)),
4721 I(SrcImmByte | NearBranch, em_jcxz),
4722 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4723 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4725 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4726 I(SrcImmFAddr | No64, em_jmp_far),
4727 D(SrcImmByte | ImplicitOps | NearBranch),
4728 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4729 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4731 N, DI(ImplicitOps, icebp), N, N,
4732 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4733 G(ByteOp, group3), G(0, group3),
4735 D(ImplicitOps), D(ImplicitOps),
4736 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4737 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4740 static const struct opcode twobyte_table[256] = {
4742 G(0, group6), GD(0, &group7), N, N,
4743 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4744 II(ImplicitOps | Priv, em_clts, clts), N,
4745 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4746 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4748 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11),
4749 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11),
4751 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4752 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4754 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4755 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4756 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4758 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4761 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4762 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4763 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4766 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4767 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4768 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4769 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4770 I(ImplicitOps | EmulateOnUD, em_sysenter),
4771 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4773 N, N, N, N, N, N, N, N,
4775 X16(D(DstReg | SrcMem | ModRM)),
4777 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4782 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4787 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4789 X16(D(SrcImm | NearBranch)),
4791 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4793 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4794 II(ImplicitOps, em_cpuid, cpuid),
4795 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4796 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4797 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4799 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4800 II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4801 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4802 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4803 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4804 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4806 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4807 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4808 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4809 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4810 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4811 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4815 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4816 I(DstReg | SrcMem | ModRM, em_bsf_c),
4817 I(DstReg | SrcMem | ModRM, em_bsr_c),
4818 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4820 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4821 N, ID(0, &instr_dual_0f_c3),
4822 N, N, N, GD(0, &group9),
4824 X8(I(DstReg, em_bswap)),
4826 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4828 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4829 N, N, N, N, N, N, N, N,
4831 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4834 static const struct instr_dual instr_dual_0f_38_f0 = {
4835 I(DstReg | SrcMem | Mov, em_movbe), N
4838 static const struct instr_dual instr_dual_0f_38_f1 = {
4839 I(DstMem | SrcReg | Mov, em_movbe), N
4842 static const struct gprefix three_byte_0f_38_f0 = {
4843 ID(0, &instr_dual_0f_38_f0), N, N, N
4846 static const struct gprefix three_byte_0f_38_f1 = {
4847 ID(0, &instr_dual_0f_38_f1), N, N, N
4851 * Insns below are selected by the prefix which indexed by the third opcode
4854 static const struct opcode opcode_map_0f_38[256] = {
4856 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4858 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4860 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4861 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4882 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4886 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4892 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4893 unsigned size, bool sign_extension)
4895 int rc = X86EMUL_CONTINUE;
4899 op->addr.mem.ea = ctxt->_eip;
4900 /* NB. Immediates are sign-extended as necessary. */
4901 switch (op->bytes) {
4903 op->val = insn_fetch(s8, ctxt);
4906 op->val = insn_fetch(s16, ctxt);
4909 op->val = insn_fetch(s32, ctxt);
4912 op->val = insn_fetch(s64, ctxt);
4915 if (!sign_extension) {
4916 switch (op->bytes) {
4924 op->val &= 0xffffffff;
4932 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4935 int rc = X86EMUL_CONTINUE;
4939 decode_register_operand(ctxt, op);
4942 rc = decode_imm(ctxt, op, 1, false);
4945 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4949 if (ctxt->d & BitOp)
4950 fetch_bit_operand(ctxt);
4951 op->orig_val = op->val;
4954 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4958 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4959 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4960 fetch_register_operand(op);
4961 op->orig_val = op->val;
4965 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4966 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4967 fetch_register_operand(op);
4968 op->orig_val = op->val;
4971 if (ctxt->d & ByteOp) {
4976 op->bytes = ctxt->op_bytes;
4977 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4978 fetch_register_operand(op);
4979 op->orig_val = op->val;
4983 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4985 register_address(ctxt, VCPU_REGS_RDI);
4986 op->addr.mem.seg = VCPU_SREG_ES;
4993 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4994 fetch_register_operand(op);
4999 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
5002 rc = decode_imm(ctxt, op, 1, true);
5010 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
5013 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
5016 ctxt->memop.bytes = 1;
5017 if (ctxt->memop.type == OP_REG) {
5018 ctxt->memop.addr.reg = decode_register(ctxt,
5019 ctxt->modrm_rm, true);
5020 fetch_register_operand(&ctxt->memop);
5024 ctxt->memop.bytes = 2;
5027 ctxt->memop.bytes = 4;
5030 rc = decode_imm(ctxt, op, 2, false);
5033 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
5037 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5039 register_address(ctxt, VCPU_REGS_RSI);
5040 op->addr.mem.seg = ctxt->seg_override;
5046 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5049 reg_read(ctxt, VCPU_REGS_RBX) +
5050 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
5051 op->addr.mem.seg = ctxt->seg_override;
5056 op->addr.mem.ea = ctxt->_eip;
5057 op->bytes = ctxt->op_bytes + 2;
5058 insn_fetch_arr(op->valptr, op->bytes, ctxt);
5061 ctxt->memop.bytes = ctxt->op_bytes + 2;
5065 op->val = VCPU_SREG_ES;
5069 op->val = VCPU_SREG_CS;
5073 op->val = VCPU_SREG_SS;
5077 op->val = VCPU_SREG_DS;
5081 op->val = VCPU_SREG_FS;
5085 op->val = VCPU_SREG_GS;
5088 /* Special instructions do their own operand decoding. */
5090 op->type = OP_NONE; /* Disable writeback. */
5098 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
5100 int rc = X86EMUL_CONTINUE;
5101 int mode = ctxt->mode;
5102 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
5103 bool op_prefix = false;
5104 bool has_seg_override = false;
5105 struct opcode opcode;
5107 struct desc_struct desc;
5109 ctxt->memop.type = OP_NONE;
5110 ctxt->memopp = NULL;
5111 ctxt->_eip = ctxt->eip;
5112 ctxt->fetch.ptr = ctxt->fetch.data;
5113 ctxt->fetch.end = ctxt->fetch.data + insn_len;
5114 ctxt->opcode_len = 1;
5116 memcpy(ctxt->fetch.data, insn, insn_len);
5118 rc = __do_insn_fetch_bytes(ctxt, 1);
5119 if (rc != X86EMUL_CONTINUE)
5124 case X86EMUL_MODE_REAL:
5125 case X86EMUL_MODE_VM86:
5126 def_op_bytes = def_ad_bytes = 2;
5127 ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
5129 def_op_bytes = def_ad_bytes = 4;
5131 case X86EMUL_MODE_PROT16:
5132 def_op_bytes = def_ad_bytes = 2;
5134 case X86EMUL_MODE_PROT32:
5135 def_op_bytes = def_ad_bytes = 4;
5137 #ifdef CONFIG_X86_64
5138 case X86EMUL_MODE_PROT64:
5144 return EMULATION_FAILED;
5147 ctxt->op_bytes = def_op_bytes;
5148 ctxt->ad_bytes = def_ad_bytes;
5150 /* Legacy prefixes. */
5152 switch (ctxt->b = insn_fetch(u8, ctxt)) {
5153 case 0x66: /* operand-size override */
5155 /* switch between 2/4 bytes */
5156 ctxt->op_bytes = def_op_bytes ^ 6;
5158 case 0x67: /* address-size override */
5159 if (mode == X86EMUL_MODE_PROT64)
5160 /* switch between 4/8 bytes */
5161 ctxt->ad_bytes = def_ad_bytes ^ 12;
5163 /* switch between 2/4 bytes */
5164 ctxt->ad_bytes = def_ad_bytes ^ 6;
5166 case 0x26: /* ES override */
5167 case 0x2e: /* CS override */
5168 case 0x36: /* SS override */
5169 case 0x3e: /* DS override */
5170 has_seg_override = true;
5171 ctxt->seg_override = (ctxt->b >> 3) & 3;
5173 case 0x64: /* FS override */
5174 case 0x65: /* GS override */
5175 has_seg_override = true;
5176 ctxt->seg_override = ctxt->b & 7;
5178 case 0x40 ... 0x4f: /* REX */
5179 if (mode != X86EMUL_MODE_PROT64)
5181 ctxt->rex_prefix = ctxt->b;
5183 case 0xf0: /* LOCK */
5184 ctxt->lock_prefix = 1;
5186 case 0xf2: /* REPNE/REPNZ */
5187 case 0xf3: /* REP/REPE/REPZ */
5188 ctxt->rep_prefix = ctxt->b;
5194 /* Any legacy prefix after a REX prefix nullifies its effect. */
5196 ctxt->rex_prefix = 0;
5202 if (ctxt->rex_prefix & 8)
5203 ctxt->op_bytes = 8; /* REX.W */
5205 /* Opcode byte(s). */
5206 opcode = opcode_table[ctxt->b];
5207 /* Two-byte opcode? */
5208 if (ctxt->b == 0x0f) {
5209 ctxt->opcode_len = 2;
5210 ctxt->b = insn_fetch(u8, ctxt);
5211 opcode = twobyte_table[ctxt->b];
5213 /* 0F_38 opcode map */
5214 if (ctxt->b == 0x38) {
5215 ctxt->opcode_len = 3;
5216 ctxt->b = insn_fetch(u8, ctxt);
5217 opcode = opcode_map_0f_38[ctxt->b];
5220 ctxt->d = opcode.flags;
5222 if (ctxt->d & ModRM)
5223 ctxt->modrm = insn_fetch(u8, ctxt);
5225 /* vex-prefix instructions are not implemented */
5226 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
5227 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
5231 while (ctxt->d & GroupMask) {
5232 switch (ctxt->d & GroupMask) {
5234 goffset = (ctxt->modrm >> 3) & 7;
5235 opcode = opcode.u.group[goffset];
5238 goffset = (ctxt->modrm >> 3) & 7;
5239 if ((ctxt->modrm >> 6) == 3)
5240 opcode = opcode.u.gdual->mod3[goffset];
5242 opcode = opcode.u.gdual->mod012[goffset];
5245 goffset = ctxt->modrm & 7;
5246 opcode = opcode.u.group[goffset];
5249 if (ctxt->rep_prefix && op_prefix)
5250 return EMULATION_FAILED;
5251 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
5252 switch (simd_prefix) {
5253 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
5254 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
5255 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
5256 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
5260 if (ctxt->modrm > 0xbf)
5261 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
5263 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
5266 if ((ctxt->modrm >> 6) == 3)
5267 opcode = opcode.u.idual->mod3;
5269 opcode = opcode.u.idual->mod012;
5272 if (ctxt->mode == X86EMUL_MODE_PROT64)
5273 opcode = opcode.u.mdual->mode64;
5275 opcode = opcode.u.mdual->mode32;
5278 return EMULATION_FAILED;
5281 ctxt->d &= ~(u64)GroupMask;
5282 ctxt->d |= opcode.flags;
5287 return EMULATION_FAILED;
5289 ctxt->execute = opcode.u.execute;
5291 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
5292 return EMULATION_FAILED;
5294 if (unlikely(ctxt->d &
5295 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
5298 * These are copied unconditionally here, and checked unconditionally
5299 * in x86_emulate_insn.
5301 ctxt->check_perm = opcode.check_perm;
5302 ctxt->intercept = opcode.intercept;
5304 if (ctxt->d & NotImpl)
5305 return EMULATION_FAILED;
5307 if (mode == X86EMUL_MODE_PROT64) {
5308 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
5310 else if (ctxt->d & NearBranch)
5314 if (ctxt->d & Op3264) {
5315 if (mode == X86EMUL_MODE_PROT64)
5321 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5325 ctxt->op_bytes = 16;
5326 else if (ctxt->d & Mmx)
5330 /* ModRM and SIB bytes. */
5331 if (ctxt->d & ModRM) {
5332 rc = decode_modrm(ctxt, &ctxt->memop);
5333 if (!has_seg_override) {
5334 has_seg_override = true;
5335 ctxt->seg_override = ctxt->modrm_seg;
5337 } else if (ctxt->d & MemAbs)
5338 rc = decode_abs(ctxt, &ctxt->memop);
5339 if (rc != X86EMUL_CONTINUE)
5342 if (!has_seg_override)
5343 ctxt->seg_override = VCPU_SREG_DS;
5345 ctxt->memop.addr.mem.seg = ctxt->seg_override;
5348 * Decode and fetch the source operand: register, memory
5351 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5352 if (rc != X86EMUL_CONTINUE)
5356 * Decode and fetch the second source operand: register, memory
5359 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5360 if (rc != X86EMUL_CONTINUE)
5363 /* Decode and fetch the destination operand: register or memory. */
5364 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5366 if (ctxt->rip_relative && likely(ctxt->memopp))
5367 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5368 ctxt->memopp->addr.mem.ea + ctxt->_eip);
5371 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5374 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5376 return ctxt->d & PageTable;
5379 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5381 /* The second termination condition only applies for REPE
5382 * and REPNE. Test if the repeat string operation prefix is
5383 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5384 * corresponding termination condition according to:
5385 * - if REPE/REPZ and ZF = 0 then done
5386 * - if REPNE/REPNZ and ZF = 1 then done
5388 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5389 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5390 && (((ctxt->rep_prefix == REPE_PREFIX) &&
5391 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5392 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
5393 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5399 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5403 rc = asm_safe("fwait");
5405 if (unlikely(rc != X86EMUL_CONTINUE))
5406 return emulate_exception(ctxt, MF_VECTOR, 0, false);
5408 return X86EMUL_CONTINUE;
5411 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
5414 if (op->type == OP_MM)
5415 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
5418 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
5420 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5422 if (!(ctxt->d & ByteOp))
5423 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5425 asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
5426 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5427 [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
5428 : "c"(ctxt->src2.val));
5430 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5431 if (!fop) /* exception is returned in fop variable */
5432 return emulate_de(ctxt);
5433 return X86EMUL_CONTINUE;
5436 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5438 memset(&ctxt->rip_relative, 0,
5439 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5441 ctxt->io_read.pos = 0;
5442 ctxt->io_read.end = 0;
5443 ctxt->mem_read.end = 0;
5446 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5448 const struct x86_emulate_ops *ops = ctxt->ops;
5449 int rc = X86EMUL_CONTINUE;
5450 int saved_dst_type = ctxt->dst.type;
5451 unsigned emul_flags;
5453 ctxt->mem_read.pos = 0;
5455 /* LOCK prefix is allowed only with some instructions */
5456 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5457 rc = emulate_ud(ctxt);
5461 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5462 rc = emulate_ud(ctxt);
5466 emul_flags = ctxt->ops->get_hflags(ctxt);
5467 if (unlikely(ctxt->d &
5468 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5469 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5470 (ctxt->d & Undefined)) {
5471 rc = emulate_ud(ctxt);
5475 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5476 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5477 rc = emulate_ud(ctxt);
5481 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5482 rc = emulate_nm(ctxt);
5486 if (ctxt->d & Mmx) {
5487 rc = flush_pending_x87_faults(ctxt);
5488 if (rc != X86EMUL_CONTINUE)
5491 * Now that we know the fpu is exception safe, we can fetch
5494 fetch_possible_mmx_operand(ctxt, &ctxt->src);
5495 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
5496 if (!(ctxt->d & Mov))
5497 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
5500 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5501 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5502 X86_ICPT_PRE_EXCEPT);
5503 if (rc != X86EMUL_CONTINUE)
5507 /* Instruction can only be executed in protected mode */
5508 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5509 rc = emulate_ud(ctxt);
5513 /* Privileged instruction can be executed only in CPL=0 */
5514 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5515 if (ctxt->d & PrivUD)
5516 rc = emulate_ud(ctxt);
5518 rc = emulate_gp(ctxt, 0);
5522 /* Do instruction specific permission checks */
5523 if (ctxt->d & CheckPerm) {
5524 rc = ctxt->check_perm(ctxt);
5525 if (rc != X86EMUL_CONTINUE)
5529 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5530 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5531 X86_ICPT_POST_EXCEPT);
5532 if (rc != X86EMUL_CONTINUE)
5536 if (ctxt->rep_prefix && (ctxt->d & String)) {
5537 /* All REP prefixes have the same first termination condition */
5538 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5539 string_registers_quirk(ctxt);
5540 ctxt->eip = ctxt->_eip;
5541 ctxt->eflags &= ~X86_EFLAGS_RF;
5547 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5548 rc = segmented_read(ctxt, ctxt->src.addr.mem,
5549 ctxt->src.valptr, ctxt->src.bytes);
5550 if (rc != X86EMUL_CONTINUE)
5552 ctxt->src.orig_val64 = ctxt->src.val64;
5555 if (ctxt->src2.type == OP_MEM) {
5556 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5557 &ctxt->src2.val, ctxt->src2.bytes);
5558 if (rc != X86EMUL_CONTINUE)
5562 if ((ctxt->d & DstMask) == ImplicitOps)
5566 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5567 /* optimisation - avoid slow emulated read if Mov */
5568 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5569 &ctxt->dst.val, ctxt->dst.bytes);
5570 if (rc != X86EMUL_CONTINUE) {
5571 if (!(ctxt->d & NoWrite) &&
5572 rc == X86EMUL_PROPAGATE_FAULT &&
5573 ctxt->exception.vector == PF_VECTOR)
5574 ctxt->exception.error_code |= PFERR_WRITE_MASK;
5578 /* Copy full 64-bit value for CMPXCHG8B. */
5579 ctxt->dst.orig_val64 = ctxt->dst.val64;
5583 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5584 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5585 X86_ICPT_POST_MEMACCESS);
5586 if (rc != X86EMUL_CONTINUE)
5590 if (ctxt->rep_prefix && (ctxt->d & String))
5591 ctxt->eflags |= X86_EFLAGS_RF;
5593 ctxt->eflags &= ~X86_EFLAGS_RF;
5595 if (ctxt->execute) {
5596 if (ctxt->d & Fastop) {
5597 void (*fop)(struct fastop *) = (void *)ctxt->execute;
5598 rc = fastop(ctxt, fop);
5599 if (rc != X86EMUL_CONTINUE)
5603 rc = ctxt->execute(ctxt);
5604 if (rc != X86EMUL_CONTINUE)
5609 if (ctxt->opcode_len == 2)
5611 else if (ctxt->opcode_len == 3)
5612 goto threebyte_insn;
5615 case 0x70 ... 0x7f: /* jcc (short) */
5616 if (test_cc(ctxt->b, ctxt->eflags))
5617 rc = jmp_rel(ctxt, ctxt->src.val);
5619 case 0x8d: /* lea r16/r32, m */
5620 ctxt->dst.val = ctxt->src.addr.mem.ea;
5622 case 0x90 ... 0x97: /* nop / xchg reg, rax */
5623 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5624 ctxt->dst.type = OP_NONE;
5628 case 0x98: /* cbw/cwde/cdqe */
5629 switch (ctxt->op_bytes) {
5630 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5631 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5632 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5635 case 0xcc: /* int3 */
5636 rc = emulate_int(ctxt, 3);
5638 case 0xcd: /* int n */
5639 rc = emulate_int(ctxt, ctxt->src.val);
5641 case 0xce: /* into */
5642 if (ctxt->eflags & X86_EFLAGS_OF)
5643 rc = emulate_int(ctxt, 4);
5645 case 0xe9: /* jmp rel */
5646 case 0xeb: /* jmp rel short */
5647 rc = jmp_rel(ctxt, ctxt->src.val);
5648 ctxt->dst.type = OP_NONE; /* Disable writeback. */
5650 case 0xf4: /* hlt */
5651 ctxt->ops->halt(ctxt);
5653 case 0xf5: /* cmc */
5654 /* complement carry flag from eflags reg */
5655 ctxt->eflags ^= X86_EFLAGS_CF;
5657 case 0xf8: /* clc */
5658 ctxt->eflags &= ~X86_EFLAGS_CF;
5660 case 0xf9: /* stc */
5661 ctxt->eflags |= X86_EFLAGS_CF;
5663 case 0xfc: /* cld */
5664 ctxt->eflags &= ~X86_EFLAGS_DF;
5666 case 0xfd: /* std */
5667 ctxt->eflags |= X86_EFLAGS_DF;
5670 goto cannot_emulate;
5673 if (rc != X86EMUL_CONTINUE)
5677 if (ctxt->d & SrcWrite) {
5678 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5679 rc = writeback(ctxt, &ctxt->src);
5680 if (rc != X86EMUL_CONTINUE)
5683 if (!(ctxt->d & NoWrite)) {
5684 rc = writeback(ctxt, &ctxt->dst);
5685 if (rc != X86EMUL_CONTINUE)
5690 * restore dst type in case the decoding will be reused
5691 * (happens for string instruction )
5693 ctxt->dst.type = saved_dst_type;
5695 if ((ctxt->d & SrcMask) == SrcSI)
5696 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5698 if ((ctxt->d & DstMask) == DstDI)
5699 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5701 if (ctxt->rep_prefix && (ctxt->d & String)) {
5703 struct read_cache *r = &ctxt->io_read;
5704 if ((ctxt->d & SrcMask) == SrcSI)
5705 count = ctxt->src.count;
5707 count = ctxt->dst.count;
5708 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5710 if (!string_insn_completed(ctxt)) {
5712 * Re-enter guest when pio read ahead buffer is empty
5713 * or, if it is not used, after each 1024 iteration.
5715 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5716 (r->end == 0 || r->end != r->pos)) {
5718 * Reset read cache. Usually happens before
5719 * decode, but since instruction is restarted
5720 * we have to do it here.
5722 ctxt->mem_read.end = 0;
5723 writeback_registers(ctxt);
5724 return EMULATION_RESTART;
5726 goto done; /* skip rip writeback */
5728 ctxt->eflags &= ~X86_EFLAGS_RF;
5731 ctxt->eip = ctxt->_eip;
5734 if (rc == X86EMUL_PROPAGATE_FAULT) {
5735 WARN_ON(ctxt->exception.vector > 0x1f);
5736 ctxt->have_exception = true;
5738 if (rc == X86EMUL_INTERCEPTED)
5739 return EMULATION_INTERCEPTED;
5741 if (rc == X86EMUL_CONTINUE)
5742 writeback_registers(ctxt);
5744 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5748 case 0x09: /* wbinvd */
5749 (ctxt->ops->wbinvd)(ctxt);
5751 case 0x08: /* invd */
5752 case 0x0d: /* GrpP (prefetch) */
5753 case 0x18: /* Grp16 (prefetch/nop) */
5754 case 0x1f: /* nop */
5756 case 0x20: /* mov cr, reg */
5757 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5759 case 0x21: /* mov from dr to reg */
5760 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5762 case 0x40 ... 0x4f: /* cmov */
5763 if (test_cc(ctxt->b, ctxt->eflags))
5764 ctxt->dst.val = ctxt->src.val;
5765 else if (ctxt->op_bytes != 4)
5766 ctxt->dst.type = OP_NONE; /* no writeback */
5768 case 0x80 ... 0x8f: /* jnz rel, etc*/
5769 if (test_cc(ctxt->b, ctxt->eflags))
5770 rc = jmp_rel(ctxt, ctxt->src.val);
5772 case 0x90 ... 0x9f: /* setcc r/m8 */
5773 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5775 case 0xb6 ... 0xb7: /* movzx */
5776 ctxt->dst.bytes = ctxt->op_bytes;
5777 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5778 : (u16) ctxt->src.val;
5780 case 0xbe ... 0xbf: /* movsx */
5781 ctxt->dst.bytes = ctxt->op_bytes;
5782 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5783 (s16) ctxt->src.val;
5786 goto cannot_emulate;
5791 if (rc != X86EMUL_CONTINUE)
5797 return EMULATION_FAILED;
5800 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5802 invalidate_registers(ctxt);
5805 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5807 writeback_registers(ctxt);
5810 bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5812 if (ctxt->rep_prefix && (ctxt->d & String))
5815 if (ctxt->d & TwoMemOp)