1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <asm/kvm_emulate.h>
26 #include <linux/stringify.h>
27 #include <asm/debugreg.h>
28 #include <asm/nospec-branch.h>
38 #define OpImplicit 1ull /* No generic decode */
39 #define OpReg 2ull /* Register */
40 #define OpMem 3ull /* Memory */
41 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
42 #define OpDI 5ull /* ES:DI/EDI/RDI */
43 #define OpMem64 6ull /* Memory, 64-bit */
44 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
45 #define OpDX 8ull /* DX register */
46 #define OpCL 9ull /* CL register (for shifts) */
47 #define OpImmByte 10ull /* 8-bit sign extended immediate */
48 #define OpOne 11ull /* Implied 1 */
49 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
50 #define OpMem16 13ull /* Memory operand (16-bit). */
51 #define OpMem32 14ull /* Memory operand (32-bit). */
52 #define OpImmU 15ull /* Immediate operand, zero extended */
53 #define OpSI 16ull /* SI/ESI/RSI */
54 #define OpImmFAddr 17ull /* Immediate far address */
55 #define OpMemFAddr 18ull /* Far address in memory */
56 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
57 #define OpES 20ull /* ES */
58 #define OpCS 21ull /* CS */
59 #define OpSS 22ull /* SS */
60 #define OpDS 23ull /* DS */
61 #define OpFS 24ull /* FS */
62 #define OpGS 25ull /* GS */
63 #define OpMem8 26ull /* 8-bit zero extended memory operand */
64 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
65 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
66 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
67 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
69 #define OpBits 5 /* Width of operand field */
70 #define OpMask ((1ull << OpBits) - 1)
73 * Opcode effective-address decode tables.
74 * Note that we only emulate instructions that have at least one memory
75 * operand (excluding implicit stack references). We assume that stack
76 * references and instruction fetches will never occur in special memory
77 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
81 /* Operand sizes: 8-bit operands or specified/overridden size. */
82 #define ByteOp (1<<0) /* 8-bit operands. */
83 /* Destination operand type. */
85 #define ImplicitOps (OpImplicit << DstShift)
86 #define DstReg (OpReg << DstShift)
87 #define DstMem (OpMem << DstShift)
88 #define DstAcc (OpAcc << DstShift)
89 #define DstDI (OpDI << DstShift)
90 #define DstMem64 (OpMem64 << DstShift)
91 #define DstMem16 (OpMem16 << DstShift)
92 #define DstImmUByte (OpImmUByte << DstShift)
93 #define DstDX (OpDX << DstShift)
94 #define DstAccLo (OpAccLo << DstShift)
95 #define DstMask (OpMask << DstShift)
96 /* Source operand type. */
98 #define SrcNone (OpNone << SrcShift)
99 #define SrcReg (OpReg << SrcShift)
100 #define SrcMem (OpMem << SrcShift)
101 #define SrcMem16 (OpMem16 << SrcShift)
102 #define SrcMem32 (OpMem32 << SrcShift)
103 #define SrcImm (OpImm << SrcShift)
104 #define SrcImmByte (OpImmByte << SrcShift)
105 #define SrcOne (OpOne << SrcShift)
106 #define SrcImmUByte (OpImmUByte << SrcShift)
107 #define SrcImmU (OpImmU << SrcShift)
108 #define SrcSI (OpSI << SrcShift)
109 #define SrcXLat (OpXLat << SrcShift)
110 #define SrcImmFAddr (OpImmFAddr << SrcShift)
111 #define SrcMemFAddr (OpMemFAddr << SrcShift)
112 #define SrcAcc (OpAcc << SrcShift)
113 #define SrcImmU16 (OpImmU16 << SrcShift)
114 #define SrcImm64 (OpImm64 << SrcShift)
115 #define SrcDX (OpDX << SrcShift)
116 #define SrcMem8 (OpMem8 << SrcShift)
117 #define SrcAccHi (OpAccHi << SrcShift)
118 #define SrcMask (OpMask << SrcShift)
119 #define BitOp (1<<11)
120 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
121 #define String (1<<13) /* String instruction (rep capable) */
122 #define Stack (1<<14) /* Stack instruction (push/pop) */
123 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
124 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
125 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
126 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
127 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
128 #define Escape (5<<15) /* Escape to coprocessor instruction */
129 #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
130 #define ModeDual (7<<15) /* Different instruction for 32/64 bit */
131 #define Sse (1<<18) /* SSE Vector instruction */
132 /* Generic ModRM decode. */
133 #define ModRM (1<<19)
134 /* Destination is only written; never read. */
137 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
138 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
139 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
140 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
141 #define Undefined (1<<25) /* No Such Instruction */
142 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
143 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
145 #define PageTable (1 << 29) /* instruction used to write page table */
146 #define NotImpl (1 << 30) /* instruction is not implemented */
147 /* Source 2 operand type */
148 #define Src2Shift (31)
149 #define Src2None (OpNone << Src2Shift)
150 #define Src2Mem (OpMem << Src2Shift)
151 #define Src2CL (OpCL << Src2Shift)
152 #define Src2ImmByte (OpImmByte << Src2Shift)
153 #define Src2One (OpOne << Src2Shift)
154 #define Src2Imm (OpImm << Src2Shift)
155 #define Src2ES (OpES << Src2Shift)
156 #define Src2CS (OpCS << Src2Shift)
157 #define Src2SS (OpSS << Src2Shift)
158 #define Src2DS (OpDS << Src2Shift)
159 #define Src2FS (OpFS << Src2Shift)
160 #define Src2GS (OpGS << Src2Shift)
161 #define Src2Mask (OpMask << Src2Shift)
162 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
163 #define AlignMask ((u64)7 << 41)
164 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
165 #define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */
166 #define Avx ((u64)3 << 41) /* Advanced Vector Extensions */
167 #define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
168 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
169 #define NoWrite ((u64)1 << 45) /* No writeback */
170 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
171 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
172 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
173 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
174 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
175 #define NearBranch ((u64)1 << 52) /* Near branches */
176 #define No16 ((u64)1 << 53) /* No 16 bit operand */
177 #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
178 #define TwoMemOp ((u64)1 << 55) /* Instruction has two memory operand */
180 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
182 #define X2(x...) x, x
183 #define X3(x...) X2(x), x
184 #define X4(x...) X2(x), X2(x)
185 #define X5(x...) X4(x), x
186 #define X6(x...) X4(x), X2(x)
187 #define X7(x...) X4(x), X3(x)
188 #define X8(x...) X4(x), X4(x)
189 #define X16(x...) X8(x), X8(x)
191 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
192 #define FASTOP_SIZE 8
195 * fastop functions have a special calling convention:
200 * flags: rflags (in/out)
201 * ex: rsi (in:fastop pointer, out:zero if exception)
203 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
204 * different operand sizes can be reached by calculation, rather than a jump
205 * table (which would be bigger than the code).
207 * fastop functions are declared as taking a never-defined fastop parameter,
208 * so they can't be called from C directly.
217 int (*execute)(struct x86_emulate_ctxt *ctxt);
218 const struct opcode *group;
219 const struct group_dual *gdual;
220 const struct gprefix *gprefix;
221 const struct escape *esc;
222 const struct instr_dual *idual;
223 const struct mode_dual *mdual;
224 void (*fastop)(struct fastop *fake);
226 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
230 struct opcode mod012[8];
231 struct opcode mod3[8];
235 struct opcode pfx_no;
236 struct opcode pfx_66;
237 struct opcode pfx_f2;
238 struct opcode pfx_f3;
243 struct opcode high[64];
247 struct opcode mod012;
252 struct opcode mode32;
253 struct opcode mode64;
256 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
258 enum x86_transfer_type {
260 X86_TRANSFER_CALL_JMP,
262 X86_TRANSFER_TASK_SWITCH,
265 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
267 if (!(ctxt->regs_valid & (1 << nr))) {
268 ctxt->regs_valid |= 1 << nr;
269 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
271 return ctxt->_regs[nr];
274 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
276 ctxt->regs_valid |= 1 << nr;
277 ctxt->regs_dirty |= 1 << nr;
278 return &ctxt->_regs[nr];
281 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
284 return reg_write(ctxt, nr);
287 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
291 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
292 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
295 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
297 ctxt->regs_dirty = 0;
298 ctxt->regs_valid = 0;
302 * These EFLAGS bits are restored from saved value during emulation, and
303 * any changes are written back to the saved value after emulation.
305 #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
306 X86_EFLAGS_PF|X86_EFLAGS_CF)
314 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
316 #define FOP_FUNC(name) \
317 ".align " __stringify(FASTOP_SIZE) " \n\t" \
318 ".type " name ", @function \n\t" \
321 #define FOP_RET "ret \n\t"
323 #define FOP_START(op) \
324 extern void em_##op(struct fastop *fake); \
325 asm(".pushsection .text, \"ax\" \n\t" \
326 ".global em_" #op " \n\t" \
333 FOP_FUNC(__stringify(__UNIQUE_ID(nop))) \
336 #define FOP1E(op, dst) \
337 FOP_FUNC(#op "_" #dst) \
338 "10: " #op " %" #dst " \n\t" FOP_RET
340 #define FOP1EEX(op, dst) \
341 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
343 #define FASTOP1(op) \
348 ON64(FOP1E(op##q, rax)) \
351 /* 1-operand, using src2 (for MUL/DIV r/m) */
352 #define FASTOP1SRC2(op, name) \
357 ON64(FOP1E(op, rcx)) \
360 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
361 #define FASTOP1SRC2EX(op, name) \
366 ON64(FOP1EEX(op, rcx)) \
369 #define FOP2E(op, dst, src) \
370 FOP_FUNC(#op "_" #dst "_" #src) \
371 #op " %" #src ", %" #dst " \n\t" FOP_RET
373 #define FASTOP2(op) \
375 FOP2E(op##b, al, dl) \
376 FOP2E(op##w, ax, dx) \
377 FOP2E(op##l, eax, edx) \
378 ON64(FOP2E(op##q, rax, rdx)) \
381 /* 2 operand, word only */
382 #define FASTOP2W(op) \
385 FOP2E(op##w, ax, dx) \
386 FOP2E(op##l, eax, edx) \
387 ON64(FOP2E(op##q, rax, rdx)) \
390 /* 2 operand, src is CL */
391 #define FASTOP2CL(op) \
393 FOP2E(op##b, al, cl) \
394 FOP2E(op##w, ax, cl) \
395 FOP2E(op##l, eax, cl) \
396 ON64(FOP2E(op##q, rax, cl)) \
399 /* 2 operand, src and dest are reversed */
400 #define FASTOP2R(op, name) \
402 FOP2E(op##b, dl, al) \
403 FOP2E(op##w, dx, ax) \
404 FOP2E(op##l, edx, eax) \
405 ON64(FOP2E(op##q, rdx, rax)) \
408 #define FOP3E(op, dst, src, src2) \
409 FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
410 #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
412 /* 3-operand, word-only, src2=cl */
413 #define FASTOP3WCL(op) \
416 FOP3E(op##w, ax, dx, cl) \
417 FOP3E(op##l, eax, edx, cl) \
418 ON64(FOP3E(op##q, rax, rdx, cl)) \
421 /* Special case for SETcc - 1 instruction per cc */
422 #define FOP_SETCC(op) \
424 ".type " #op ", @function \n\t" \
429 asm(".pushsection .fixup, \"ax\"\n"
430 ".global kvm_fastop_exception \n"
431 "kvm_fastop_exception: xor %esi, %esi; ret\n"
453 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
457 * XXX: inoutclob user must know where the argument is being expanded.
458 * Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault.
460 #define asm_safe(insn, inoutclob...) \
464 asm volatile("1:" insn "\n" \
466 ".pushsection .fixup, \"ax\"\n" \
467 "3: movl $1, %[_fault]\n" \
470 _ASM_EXTABLE(1b, 3b) \
471 : [_fault] "+qm"(_fault) inoutclob ); \
473 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
476 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
477 enum x86_intercept intercept,
478 enum x86_intercept_stage stage)
480 struct x86_instruction_info info = {
481 .intercept = intercept,
482 .rep_prefix = ctxt->rep_prefix,
483 .modrm_mod = ctxt->modrm_mod,
484 .modrm_reg = ctxt->modrm_reg,
485 .modrm_rm = ctxt->modrm_rm,
486 .src_val = ctxt->src.val64,
487 .dst_val = ctxt->dst.val64,
488 .src_bytes = ctxt->src.bytes,
489 .dst_bytes = ctxt->dst.bytes,
490 .ad_bytes = ctxt->ad_bytes,
491 .next_rip = ctxt->eip,
494 return ctxt->ops->intercept(ctxt, &info, stage);
497 static void assign_masked(ulong *dest, ulong src, ulong mask)
499 *dest = (*dest & ~mask) | (src & mask);
502 static void assign_register(unsigned long *reg, u64 val, int bytes)
504 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
507 *(u8 *)reg = (u8)val;
510 *(u16 *)reg = (u16)val;
514 break; /* 64b: zero-extend */
521 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
523 return (1UL << (ctxt->ad_bytes << 3)) - 1;
526 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
529 struct desc_struct ss;
531 if (ctxt->mode == X86EMUL_MODE_PROT64)
533 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
534 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
537 static int stack_size(struct x86_emulate_ctxt *ctxt)
539 return (__fls(stack_mask(ctxt)) + 1) >> 3;
542 /* Access/update address held in a register, based on addressing mode. */
543 static inline unsigned long
544 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
546 if (ctxt->ad_bytes == sizeof(unsigned long))
549 return reg & ad_mask(ctxt);
552 static inline unsigned long
553 register_address(struct x86_emulate_ctxt *ctxt, int reg)
555 return address_mask(ctxt, reg_read(ctxt, reg));
558 static void masked_increment(ulong *reg, ulong mask, int inc)
560 assign_masked(reg, *reg + inc, mask);
564 register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
566 ulong *preg = reg_rmw(ctxt, reg);
568 assign_register(preg, *preg + inc, ctxt->ad_bytes);
571 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
573 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
576 static u32 desc_limit_scaled(struct desc_struct *desc)
578 u32 limit = get_desc_limit(desc);
580 return desc->g ? (limit << 12) | 0xfff : limit;
583 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
585 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
588 return ctxt->ops->get_cached_segment_base(ctxt, seg);
591 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
592 u32 error, bool valid)
595 ctxt->exception.vector = vec;
596 ctxt->exception.error_code = error;
597 ctxt->exception.error_code_valid = valid;
598 return X86EMUL_PROPAGATE_FAULT;
601 static int emulate_db(struct x86_emulate_ctxt *ctxt)
603 return emulate_exception(ctxt, DB_VECTOR, 0, false);
606 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
608 return emulate_exception(ctxt, GP_VECTOR, err, true);
611 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
613 return emulate_exception(ctxt, SS_VECTOR, err, true);
616 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
618 return emulate_exception(ctxt, UD_VECTOR, 0, false);
621 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
623 return emulate_exception(ctxt, TS_VECTOR, err, true);
626 static int emulate_de(struct x86_emulate_ctxt *ctxt)
628 return emulate_exception(ctxt, DE_VECTOR, 0, false);
631 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
633 return emulate_exception(ctxt, NM_VECTOR, 0, false);
636 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
639 struct desc_struct desc;
641 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
645 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
650 struct desc_struct desc;
652 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
653 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
657 * x86 defines three classes of vector instructions: explicitly
658 * aligned, explicitly unaligned, and the rest, which change behaviour
659 * depending on whether they're AVX encoded or not.
661 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
662 * subject to the same check. FXSAVE and FXRSTOR are checked here too as their
663 * 512 bytes of data must be aligned to a 16 byte boundary.
665 static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
667 u64 alignment = ctxt->d & AlignMask;
669 if (likely(size < 16))
684 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
685 struct segmented_address addr,
686 unsigned *max_size, unsigned size,
687 bool write, bool fetch,
688 enum x86emul_mode mode, ulong *linear)
690 struct desc_struct desc;
697 la = seg_base(ctxt, addr.seg) + addr.ea;
700 case X86EMUL_MODE_PROT64:
702 va_bits = ctxt_virt_addr_bits(ctxt);
703 if (get_canonical(la, va_bits) != la)
706 *max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
707 if (size > *max_size)
711 *linear = la = (u32)la;
712 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
716 /* code segment in protected mode or read-only data segment */
717 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
718 || !(desc.type & 2)) && write)
720 /* unreadable code segment */
721 if (!fetch && (desc.type & 8) && !(desc.type & 2))
723 lim = desc_limit_scaled(&desc);
724 if (!(desc.type & 8) && (desc.type & 4)) {
725 /* expand-down segment */
728 lim = desc.d ? 0xffffffff : 0xffff;
732 if (lim == 0xffffffff)
735 *max_size = (u64)lim + 1 - addr.ea;
736 if (size > *max_size)
741 if (la & (insn_alignment(ctxt, size) - 1))
742 return emulate_gp(ctxt, 0);
743 return X86EMUL_CONTINUE;
745 if (addr.seg == VCPU_SREG_SS)
746 return emulate_ss(ctxt, 0);
748 return emulate_gp(ctxt, 0);
751 static int linearize(struct x86_emulate_ctxt *ctxt,
752 struct segmented_address addr,
753 unsigned size, bool write,
757 return __linearize(ctxt, addr, &max_size, size, write, false,
761 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
762 enum x86emul_mode mode)
767 struct segmented_address addr = { .seg = VCPU_SREG_CS,
770 if (ctxt->op_bytes != sizeof(unsigned long))
771 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
772 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
773 if (rc == X86EMUL_CONTINUE)
774 ctxt->_eip = addr.ea;
778 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
780 return assign_eip(ctxt, dst, ctxt->mode);
783 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
784 const struct desc_struct *cs_desc)
786 enum x86emul_mode mode = ctxt->mode;
790 if (ctxt->mode >= X86EMUL_MODE_PROT16) {
794 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
796 mode = X86EMUL_MODE_PROT64;
798 mode = X86EMUL_MODE_PROT32; /* temporary value */
801 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
802 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
803 rc = assign_eip(ctxt, dst, mode);
804 if (rc == X86EMUL_CONTINUE)
809 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
811 return assign_eip_near(ctxt, ctxt->_eip + rel);
814 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
815 struct segmented_address addr,
822 rc = linearize(ctxt, addr, size, false, &linear);
823 if (rc != X86EMUL_CONTINUE)
825 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
828 static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
829 struct segmented_address addr,
836 rc = linearize(ctxt, addr, size, true, &linear);
837 if (rc != X86EMUL_CONTINUE)
839 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
843 * Prefetch the remaining bytes of the instruction without crossing page
844 * boundary if they are not in fetch_cache yet.
846 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
849 unsigned size, max_size;
850 unsigned long linear;
851 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
852 struct segmented_address addr = { .seg = VCPU_SREG_CS,
853 .ea = ctxt->eip + cur_size };
856 * We do not know exactly how many bytes will be needed, and
857 * __linearize is expensive, so fetch as much as possible. We
858 * just have to avoid going beyond the 15 byte limit, the end
859 * of the segment, or the end of the page.
861 * __linearize is called with size 0 so that it does not do any
862 * boundary check itself. Instead, we use max_size to check
865 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
867 if (unlikely(rc != X86EMUL_CONTINUE))
870 size = min_t(unsigned, 15UL ^ cur_size, max_size);
871 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
874 * One instruction can only straddle two pages,
875 * and one has been loaded at the beginning of
876 * x86_decode_insn. So, if not enough bytes
877 * still, we must have hit the 15-byte boundary.
879 if (unlikely(size < op_size))
880 return emulate_gp(ctxt, 0);
882 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
883 size, &ctxt->exception);
884 if (unlikely(rc != X86EMUL_CONTINUE))
886 ctxt->fetch.end += size;
887 return X86EMUL_CONTINUE;
890 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
893 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
895 if (unlikely(done_size < size))
896 return __do_insn_fetch_bytes(ctxt, size - done_size);
898 return X86EMUL_CONTINUE;
901 /* Fetch next part of the instruction being emulated. */
902 #define insn_fetch(_type, _ctxt) \
905 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
906 if (rc != X86EMUL_CONTINUE) \
908 ctxt->_eip += sizeof(_type); \
909 memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
910 ctxt->fetch.ptr += sizeof(_type); \
914 #define insn_fetch_arr(_arr, _size, _ctxt) \
916 rc = do_insn_fetch_bytes(_ctxt, _size); \
917 if (rc != X86EMUL_CONTINUE) \
919 ctxt->_eip += (_size); \
920 memcpy(_arr, ctxt->fetch.ptr, _size); \
921 ctxt->fetch.ptr += (_size); \
925 * Given the 'reg' portion of a ModRM byte, and a register block, return a
926 * pointer into the block that addresses the relevant register.
927 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
929 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
933 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
935 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
936 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
938 p = reg_rmw(ctxt, modrm_reg);
942 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
943 struct segmented_address addr,
944 u16 *size, unsigned long *address, int op_bytes)
951 rc = segmented_read_std(ctxt, addr, size, 2);
952 if (rc != X86EMUL_CONTINUE)
955 rc = segmented_read_std(ctxt, addr, address, op_bytes);
969 FASTOP1SRC2(mul, mul_ex);
970 FASTOP1SRC2(imul, imul_ex);
971 FASTOP1SRC2EX(div, div_ex);
972 FASTOP1SRC2EX(idiv, idiv_ex);
1001 FASTOP2R(cmp, cmp_r);
1003 static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1005 /* If src is zero, do not writeback, but update flags */
1006 if (ctxt->src.val == 0)
1007 ctxt->dst.type = OP_NONE;
1008 return fastop(ctxt, em_bsf);
1011 static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1013 /* If src is zero, do not writeback, but update flags */
1014 if (ctxt->src.val == 0)
1015 ctxt->dst.type = OP_NONE;
1016 return fastop(ctxt, em_bsr);
1019 static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1022 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
1024 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1025 asm("push %[flags]; popf; " CALL_NOSPEC
1026 : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
1030 static void fetch_register_operand(struct operand *op)
1032 switch (op->bytes) {
1034 op->val = *(u8 *)op->addr.reg;
1037 op->val = *(u16 *)op->addr.reg;
1040 op->val = *(u32 *)op->addr.reg;
1043 op->val = *(u64 *)op->addr.reg;
1048 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
1051 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
1052 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
1053 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
1054 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
1055 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
1056 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
1057 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
1058 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
1059 #ifdef CONFIG_X86_64
1060 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
1061 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
1062 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
1063 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
1064 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
1065 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
1066 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
1067 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
1073 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
1077 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1078 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
1079 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
1080 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
1081 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
1082 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
1083 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
1084 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
1085 #ifdef CONFIG_X86_64
1086 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1087 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1088 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1089 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1090 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1091 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1092 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1093 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1099 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1102 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1103 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1104 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1105 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1106 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1107 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1108 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1109 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1114 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1117 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1118 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1119 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1120 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1121 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1122 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1123 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1124 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1129 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1131 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1132 return emulate_nm(ctxt);
1134 asm volatile("fninit");
1135 return X86EMUL_CONTINUE;
1138 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1142 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1143 return emulate_nm(ctxt);
1145 asm volatile("fnstcw %0": "+m"(fcw));
1147 ctxt->dst.val = fcw;
1149 return X86EMUL_CONTINUE;
1152 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1156 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1157 return emulate_nm(ctxt);
1159 asm volatile("fnstsw %0": "+m"(fsw));
1161 ctxt->dst.val = fsw;
1163 return X86EMUL_CONTINUE;
1166 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1169 unsigned reg = ctxt->modrm_reg;
1171 if (!(ctxt->d & ModRM))
1172 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1174 if (ctxt->d & Sse) {
1178 read_sse_reg(ctxt, &op->vec_val, reg);
1181 if (ctxt->d & Mmx) {
1190 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1191 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1193 fetch_register_operand(op);
1194 op->orig_val = op->val;
1197 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1199 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1200 ctxt->modrm_seg = VCPU_SREG_SS;
1203 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1207 int index_reg, base_reg, scale;
1208 int rc = X86EMUL_CONTINUE;
1211 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1212 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1213 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1215 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1216 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1217 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1218 ctxt->modrm_seg = VCPU_SREG_DS;
1220 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1222 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1223 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1225 if (ctxt->d & Sse) {
1228 op->addr.xmm = ctxt->modrm_rm;
1229 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1232 if (ctxt->d & Mmx) {
1235 op->addr.mm = ctxt->modrm_rm & 7;
1238 fetch_register_operand(op);
1244 if (ctxt->ad_bytes == 2) {
1245 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1246 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1247 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1248 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1250 /* 16-bit ModR/M decode. */
1251 switch (ctxt->modrm_mod) {
1253 if (ctxt->modrm_rm == 6)
1254 modrm_ea += insn_fetch(u16, ctxt);
1257 modrm_ea += insn_fetch(s8, ctxt);
1260 modrm_ea += insn_fetch(u16, ctxt);
1263 switch (ctxt->modrm_rm) {
1265 modrm_ea += bx + si;
1268 modrm_ea += bx + di;
1271 modrm_ea += bp + si;
1274 modrm_ea += bp + di;
1283 if (ctxt->modrm_mod != 0)
1290 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1291 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1292 ctxt->modrm_seg = VCPU_SREG_SS;
1293 modrm_ea = (u16)modrm_ea;
1295 /* 32/64-bit ModR/M decode. */
1296 if ((ctxt->modrm_rm & 7) == 4) {
1297 sib = insn_fetch(u8, ctxt);
1298 index_reg |= (sib >> 3) & 7;
1299 base_reg |= sib & 7;
1302 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1303 modrm_ea += insn_fetch(s32, ctxt);
1305 modrm_ea += reg_read(ctxt, base_reg);
1306 adjust_modrm_seg(ctxt, base_reg);
1307 /* Increment ESP on POP [ESP] */
1308 if ((ctxt->d & IncSP) &&
1309 base_reg == VCPU_REGS_RSP)
1310 modrm_ea += ctxt->op_bytes;
1313 modrm_ea += reg_read(ctxt, index_reg) << scale;
1314 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1315 modrm_ea += insn_fetch(s32, ctxt);
1316 if (ctxt->mode == X86EMUL_MODE_PROT64)
1317 ctxt->rip_relative = 1;
1319 base_reg = ctxt->modrm_rm;
1320 modrm_ea += reg_read(ctxt, base_reg);
1321 adjust_modrm_seg(ctxt, base_reg);
1323 switch (ctxt->modrm_mod) {
1325 modrm_ea += insn_fetch(s8, ctxt);
1328 modrm_ea += insn_fetch(s32, ctxt);
1332 op->addr.mem.ea = modrm_ea;
1333 if (ctxt->ad_bytes != 8)
1334 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1340 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1343 int rc = X86EMUL_CONTINUE;
1346 switch (ctxt->ad_bytes) {
1348 op->addr.mem.ea = insn_fetch(u16, ctxt);
1351 op->addr.mem.ea = insn_fetch(u32, ctxt);
1354 op->addr.mem.ea = insn_fetch(u64, ctxt);
1361 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1365 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1366 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1368 if (ctxt->src.bytes == 2)
1369 sv = (s16)ctxt->src.val & (s16)mask;
1370 else if (ctxt->src.bytes == 4)
1371 sv = (s32)ctxt->src.val & (s32)mask;
1373 sv = (s64)ctxt->src.val & (s64)mask;
1375 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1376 ctxt->dst.addr.mem.ea + (sv >> 3));
1379 /* only subword offset */
1380 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1383 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1384 unsigned long addr, void *dest, unsigned size)
1387 struct read_cache *mc = &ctxt->mem_read;
1389 if (mc->pos < mc->end)
1392 WARN_ON((mc->end + size) >= sizeof(mc->data));
1394 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1396 if (rc != X86EMUL_CONTINUE)
1402 memcpy(dest, mc->data + mc->pos, size);
1404 return X86EMUL_CONTINUE;
1407 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1408 struct segmented_address addr,
1415 rc = linearize(ctxt, addr, size, false, &linear);
1416 if (rc != X86EMUL_CONTINUE)
1418 return read_emulated(ctxt, linear, data, size);
1421 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1422 struct segmented_address addr,
1429 rc = linearize(ctxt, addr, size, true, &linear);
1430 if (rc != X86EMUL_CONTINUE)
1432 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1436 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1437 struct segmented_address addr,
1438 const void *orig_data, const void *data,
1444 rc = linearize(ctxt, addr, size, true, &linear);
1445 if (rc != X86EMUL_CONTINUE)
1447 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1448 size, &ctxt->exception);
1451 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1452 unsigned int size, unsigned short port,
1455 struct read_cache *rc = &ctxt->io_read;
1457 if (rc->pos == rc->end) { /* refill pio read ahead */
1458 unsigned int in_page, n;
1459 unsigned int count = ctxt->rep_prefix ?
1460 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1461 in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1462 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1463 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1464 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1467 rc->pos = rc->end = 0;
1468 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1473 if (ctxt->rep_prefix && (ctxt->d & String) &&
1474 !(ctxt->eflags & X86_EFLAGS_DF)) {
1475 ctxt->dst.data = rc->data + rc->pos;
1476 ctxt->dst.type = OP_MEM_STR;
1477 ctxt->dst.count = (rc->end - rc->pos) / size;
1480 memcpy(dest, rc->data + rc->pos, size);
1486 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1487 u16 index, struct desc_struct *desc)
1492 ctxt->ops->get_idt(ctxt, &dt);
1494 if (dt.size < index * 8 + 7)
1495 return emulate_gp(ctxt, index << 3 | 0x2);
1497 addr = dt.address + index * 8;
1498 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1502 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1503 u16 selector, struct desc_ptr *dt)
1505 const struct x86_emulate_ops *ops = ctxt->ops;
1508 if (selector & 1 << 2) {
1509 struct desc_struct desc;
1512 memset (dt, 0, sizeof *dt);
1513 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1517 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1518 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1520 ops->get_gdt(ctxt, dt);
1523 static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1524 u16 selector, ulong *desc_addr_p)
1527 u16 index = selector >> 3;
1530 get_descriptor_table_ptr(ctxt, selector, &dt);
1532 if (dt.size < index * 8 + 7)
1533 return emulate_gp(ctxt, selector & 0xfffc);
1535 addr = dt.address + index * 8;
1537 #ifdef CONFIG_X86_64
1538 if (addr >> 32 != 0) {
1541 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1542 if (!(efer & EFER_LMA))
1547 *desc_addr_p = addr;
1548 return X86EMUL_CONTINUE;
1551 /* allowed just for 8 bytes segments */
1552 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1553 u16 selector, struct desc_struct *desc,
1558 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1559 if (rc != X86EMUL_CONTINUE)
1562 return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
1566 /* allowed just for 8 bytes segments */
1567 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1568 u16 selector, struct desc_struct *desc)
1573 rc = get_descriptor_ptr(ctxt, selector, &addr);
1574 if (rc != X86EMUL_CONTINUE)
1577 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1581 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1582 u16 selector, int seg, u8 cpl,
1583 enum x86_transfer_type transfer,
1584 struct desc_struct *desc)
1586 struct desc_struct seg_desc, old_desc;
1588 unsigned err_vec = GP_VECTOR;
1590 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1596 memset(&seg_desc, 0, sizeof seg_desc);
1598 if (ctxt->mode == X86EMUL_MODE_REAL) {
1599 /* set real mode segment descriptor (keep limit etc. for
1601 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1602 set_desc_base(&seg_desc, selector << 4);
1604 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1605 /* VM86 needs a clean new segment descriptor */
1606 set_desc_base(&seg_desc, selector << 4);
1607 set_desc_limit(&seg_desc, 0xffff);
1617 /* TR should be in GDT only */
1618 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1621 /* NULL selector is not valid for TR, CS and (except for long mode) SS */
1622 if (null_selector) {
1623 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1626 if (seg == VCPU_SREG_SS) {
1627 if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1631 * ctxt->ops->set_segment expects the CPL to be in
1632 * SS.DPL, so fake an expand-up 32-bit data segment.
1642 /* Skip all following checks */
1646 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1647 if (ret != X86EMUL_CONTINUE)
1650 err_code = selector & 0xfffc;
1651 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1654 /* can't load system descriptor into segment selector */
1655 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1656 if (transfer == X86_TRANSFER_CALL_JMP)
1657 return X86EMUL_UNHANDLEABLE;
1662 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1671 * segment is not a writable data segment or segment
1672 * selector's RPL != CPL or segment selector's RPL != CPL
1674 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1678 if (!(seg_desc.type & 8))
1681 if (seg_desc.type & 4) {
1687 if (rpl > cpl || dpl != cpl)
1690 /* in long-mode d/b must be clear if l is set */
1691 if (seg_desc.d && seg_desc.l) {
1694 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1695 if (efer & EFER_LMA)
1699 /* CS(RPL) <- CPL */
1700 selector = (selector & 0xfffc) | cpl;
1703 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1705 old_desc = seg_desc;
1706 seg_desc.type |= 2; /* busy */
1707 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1708 sizeof(seg_desc), &ctxt->exception);
1709 if (ret != X86EMUL_CONTINUE)
1712 case VCPU_SREG_LDTR:
1713 if (seg_desc.s || seg_desc.type != 2)
1716 default: /* DS, ES, FS, or GS */
1718 * segment is not a data or readable code segment or
1719 * ((segment is a data or nonconforming code segment)
1720 * and (both RPL and CPL > DPL))
1722 if ((seg_desc.type & 0xa) == 0x8 ||
1723 (((seg_desc.type & 0xc) != 0xc) &&
1724 (rpl > dpl && cpl > dpl)))
1730 /* mark segment as accessed */
1731 if (!(seg_desc.type & 1)) {
1733 ret = write_segment_descriptor(ctxt, selector,
1735 if (ret != X86EMUL_CONTINUE)
1738 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1739 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1740 sizeof(base3), &ctxt->exception);
1741 if (ret != X86EMUL_CONTINUE)
1743 if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
1744 ((u64)base3 << 32), ctxt))
1745 return emulate_gp(ctxt, 0);
1748 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1751 return X86EMUL_CONTINUE;
1753 return emulate_exception(ctxt, err_vec, err_code, true);
1756 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1757 u16 selector, int seg)
1759 u8 cpl = ctxt->ops->cpl(ctxt);
1762 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
1763 * they can load it at CPL<3 (Intel's manual says only LSS can,
1766 * However, the Intel manual says that putting IST=1/DPL=3 in
1767 * an interrupt gate will result in SS=3 (the AMD manual instead
1768 * says it doesn't), so allow SS=3 in __load_segment_descriptor
1769 * and only forbid it here.
1771 if (seg == VCPU_SREG_SS && selector == 3 &&
1772 ctxt->mode == X86EMUL_MODE_PROT64)
1773 return emulate_exception(ctxt, GP_VECTOR, 0, true);
1775 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1776 X86_TRANSFER_NONE, NULL);
1779 static void write_register_operand(struct operand *op)
1781 return assign_register(op->addr.reg, op->val, op->bytes);
1784 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1788 write_register_operand(op);
1791 if (ctxt->lock_prefix)
1792 return segmented_cmpxchg(ctxt,
1798 return segmented_write(ctxt,
1804 return segmented_write(ctxt,
1807 op->bytes * op->count);
1810 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1813 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1821 return X86EMUL_CONTINUE;
1824 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1826 struct segmented_address addr;
1828 rsp_increment(ctxt, -bytes);
1829 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1830 addr.seg = VCPU_SREG_SS;
1832 return segmented_write(ctxt, addr, data, bytes);
1835 static int em_push(struct x86_emulate_ctxt *ctxt)
1837 /* Disable writeback. */
1838 ctxt->dst.type = OP_NONE;
1839 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1842 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1843 void *dest, int len)
1846 struct segmented_address addr;
1848 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1849 addr.seg = VCPU_SREG_SS;
1850 rc = segmented_read(ctxt, addr, dest, len);
1851 if (rc != X86EMUL_CONTINUE)
1854 rsp_increment(ctxt, len);
1858 static int em_pop(struct x86_emulate_ctxt *ctxt)
1860 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1863 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1864 void *dest, int len)
1867 unsigned long val, change_mask;
1868 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1869 int cpl = ctxt->ops->cpl(ctxt);
1871 rc = emulate_pop(ctxt, &val, len);
1872 if (rc != X86EMUL_CONTINUE)
1875 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1876 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1877 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1878 X86_EFLAGS_AC | X86_EFLAGS_ID;
1880 switch(ctxt->mode) {
1881 case X86EMUL_MODE_PROT64:
1882 case X86EMUL_MODE_PROT32:
1883 case X86EMUL_MODE_PROT16:
1885 change_mask |= X86_EFLAGS_IOPL;
1887 change_mask |= X86_EFLAGS_IF;
1889 case X86EMUL_MODE_VM86:
1891 return emulate_gp(ctxt, 0);
1892 change_mask |= X86_EFLAGS_IF;
1894 default: /* real mode */
1895 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1899 *(unsigned long *)dest =
1900 (ctxt->eflags & ~change_mask) | (val & change_mask);
1905 static int em_popf(struct x86_emulate_ctxt *ctxt)
1907 ctxt->dst.type = OP_REG;
1908 ctxt->dst.addr.reg = &ctxt->eflags;
1909 ctxt->dst.bytes = ctxt->op_bytes;
1910 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1913 static int em_enter(struct x86_emulate_ctxt *ctxt)
1916 unsigned frame_size = ctxt->src.val;
1917 unsigned nesting_level = ctxt->src2.val & 31;
1921 return X86EMUL_UNHANDLEABLE;
1923 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1924 rc = push(ctxt, &rbp, stack_size(ctxt));
1925 if (rc != X86EMUL_CONTINUE)
1927 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1929 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1930 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1932 return X86EMUL_CONTINUE;
1935 static int em_leave(struct x86_emulate_ctxt *ctxt)
1937 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1939 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1942 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1944 int seg = ctxt->src2.val;
1946 ctxt->src.val = get_segment_selector(ctxt, seg);
1947 if (ctxt->op_bytes == 4) {
1948 rsp_increment(ctxt, -2);
1952 return em_push(ctxt);
1955 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1957 int seg = ctxt->src2.val;
1958 unsigned long selector;
1961 rc = emulate_pop(ctxt, &selector, 2);
1962 if (rc != X86EMUL_CONTINUE)
1965 if (ctxt->modrm_reg == VCPU_SREG_SS)
1966 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1967 if (ctxt->op_bytes > 2)
1968 rsp_increment(ctxt, ctxt->op_bytes - 2);
1970 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1974 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1976 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1977 int rc = X86EMUL_CONTINUE;
1978 int reg = VCPU_REGS_RAX;
1980 while (reg <= VCPU_REGS_RDI) {
1981 (reg == VCPU_REGS_RSP) ?
1982 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1985 if (rc != X86EMUL_CONTINUE)
1994 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1996 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
1997 return em_push(ctxt);
2000 static int em_popa(struct x86_emulate_ctxt *ctxt)
2002 int rc = X86EMUL_CONTINUE;
2003 int reg = VCPU_REGS_RDI;
2006 while (reg >= VCPU_REGS_RAX) {
2007 if (reg == VCPU_REGS_RSP) {
2008 rsp_increment(ctxt, ctxt->op_bytes);
2012 rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2013 if (rc != X86EMUL_CONTINUE)
2015 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2021 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2023 const struct x86_emulate_ops *ops = ctxt->ops;
2030 /* TODO: Add limit checks */
2031 ctxt->src.val = ctxt->eflags;
2033 if (rc != X86EMUL_CONTINUE)
2036 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2038 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2040 if (rc != X86EMUL_CONTINUE)
2043 ctxt->src.val = ctxt->_eip;
2045 if (rc != X86EMUL_CONTINUE)
2048 ops->get_idt(ctxt, &dt);
2050 eip_addr = dt.address + (irq << 2);
2051 cs_addr = dt.address + (irq << 2) + 2;
2053 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
2054 if (rc != X86EMUL_CONTINUE)
2057 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
2058 if (rc != X86EMUL_CONTINUE)
2061 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2062 if (rc != X86EMUL_CONTINUE)
2070 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2074 invalidate_registers(ctxt);
2075 rc = __emulate_int_real(ctxt, irq);
2076 if (rc == X86EMUL_CONTINUE)
2077 writeback_registers(ctxt);
2081 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2083 switch(ctxt->mode) {
2084 case X86EMUL_MODE_REAL:
2085 return __emulate_int_real(ctxt, irq);
2086 case X86EMUL_MODE_VM86:
2087 case X86EMUL_MODE_PROT16:
2088 case X86EMUL_MODE_PROT32:
2089 case X86EMUL_MODE_PROT64:
2091 /* Protected mode interrupts unimplemented yet */
2092 return X86EMUL_UNHANDLEABLE;
2096 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2098 int rc = X86EMUL_CONTINUE;
2099 unsigned long temp_eip = 0;
2100 unsigned long temp_eflags = 0;
2101 unsigned long cs = 0;
2102 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2103 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2104 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2105 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2106 X86_EFLAGS_AC | X86_EFLAGS_ID |
2108 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2111 /* TODO: Add stack limit check */
2113 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2115 if (rc != X86EMUL_CONTINUE)
2118 if (temp_eip & ~0xffff)
2119 return emulate_gp(ctxt, 0);
2121 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2123 if (rc != X86EMUL_CONTINUE)
2126 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2128 if (rc != X86EMUL_CONTINUE)
2131 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2133 if (rc != X86EMUL_CONTINUE)
2136 ctxt->_eip = temp_eip;
2138 if (ctxt->op_bytes == 4)
2139 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2140 else if (ctxt->op_bytes == 2) {
2141 ctxt->eflags &= ~0xffff;
2142 ctxt->eflags |= temp_eflags;
2145 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2146 ctxt->eflags |= X86_EFLAGS_FIXED;
2147 ctxt->ops->set_nmi_mask(ctxt, false);
2152 static int em_iret(struct x86_emulate_ctxt *ctxt)
2154 switch(ctxt->mode) {
2155 case X86EMUL_MODE_REAL:
2156 return emulate_iret_real(ctxt);
2157 case X86EMUL_MODE_VM86:
2158 case X86EMUL_MODE_PROT16:
2159 case X86EMUL_MODE_PROT32:
2160 case X86EMUL_MODE_PROT64:
2162 /* iret from protected mode unimplemented yet */
2163 return X86EMUL_UNHANDLEABLE;
2167 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2171 struct desc_struct new_desc;
2172 u8 cpl = ctxt->ops->cpl(ctxt);
2174 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2176 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2177 X86_TRANSFER_CALL_JMP,
2179 if (rc != X86EMUL_CONTINUE)
2182 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2183 /* Error handling is not implemented. */
2184 if (rc != X86EMUL_CONTINUE)
2185 return X86EMUL_UNHANDLEABLE;
2190 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2192 return assign_eip_near(ctxt, ctxt->src.val);
2195 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2200 old_eip = ctxt->_eip;
2201 rc = assign_eip_near(ctxt, ctxt->src.val);
2202 if (rc != X86EMUL_CONTINUE)
2204 ctxt->src.val = old_eip;
2209 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2211 u64 old = ctxt->dst.orig_val64;
2213 if (ctxt->dst.bytes == 16)
2214 return X86EMUL_UNHANDLEABLE;
2216 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2217 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2218 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2219 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2220 ctxt->eflags &= ~X86_EFLAGS_ZF;
2222 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2223 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2225 ctxt->eflags |= X86_EFLAGS_ZF;
2227 return X86EMUL_CONTINUE;
2230 static int em_ret(struct x86_emulate_ctxt *ctxt)
2235 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2236 if (rc != X86EMUL_CONTINUE)
2239 return assign_eip_near(ctxt, eip);
2242 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2245 unsigned long eip, cs;
2246 int cpl = ctxt->ops->cpl(ctxt);
2247 struct desc_struct new_desc;
2249 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2250 if (rc != X86EMUL_CONTINUE)
2252 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2253 if (rc != X86EMUL_CONTINUE)
2255 /* Outer-privilege level return is not implemented */
2256 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2257 return X86EMUL_UNHANDLEABLE;
2258 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2261 if (rc != X86EMUL_CONTINUE)
2263 rc = assign_eip_far(ctxt, eip, &new_desc);
2264 /* Error handling is not implemented. */
2265 if (rc != X86EMUL_CONTINUE)
2266 return X86EMUL_UNHANDLEABLE;
2271 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2275 rc = em_ret_far(ctxt);
2276 if (rc != X86EMUL_CONTINUE)
2278 rsp_increment(ctxt, ctxt->src.val);
2279 return X86EMUL_CONTINUE;
2282 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2284 /* Save real source value, then compare EAX against destination. */
2285 ctxt->dst.orig_val = ctxt->dst.val;
2286 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2287 ctxt->src.orig_val = ctxt->src.val;
2288 ctxt->src.val = ctxt->dst.orig_val;
2289 fastop(ctxt, em_cmp);
2291 if (ctxt->eflags & X86_EFLAGS_ZF) {
2292 /* Success: write back to memory; no update of EAX */
2293 ctxt->src.type = OP_NONE;
2294 ctxt->dst.val = ctxt->src.orig_val;
2296 /* Failure: write the value we saw to EAX. */
2297 ctxt->src.type = OP_REG;
2298 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2299 ctxt->src.val = ctxt->dst.orig_val;
2300 /* Create write-cycle to dest by writing the same value */
2301 ctxt->dst.val = ctxt->dst.orig_val;
2303 return X86EMUL_CONTINUE;
2306 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2308 int seg = ctxt->src2.val;
2312 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2314 rc = load_segment_descriptor(ctxt, sel, seg);
2315 if (rc != X86EMUL_CONTINUE)
2318 ctxt->dst.val = ctxt->src.val;
2322 static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
2324 u32 eax, ebx, ecx, edx;
2328 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2329 return edx & bit(X86_FEATURE_LM);
2332 #define GET_SMSTATE(type, smbase, offset) \
2335 int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val, \
2337 if (r != X86EMUL_CONTINUE) \
2338 return X86EMUL_UNHANDLEABLE; \
2342 static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
2344 desc->g = (flags >> 23) & 1;
2345 desc->d = (flags >> 22) & 1;
2346 desc->l = (flags >> 21) & 1;
2347 desc->avl = (flags >> 20) & 1;
2348 desc->p = (flags >> 15) & 1;
2349 desc->dpl = (flags >> 13) & 3;
2350 desc->s = (flags >> 12) & 1;
2351 desc->type = (flags >> 8) & 15;
2354 static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
2356 struct desc_struct desc;
2360 selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4);
2363 offset = 0x7f84 + n * 12;
2365 offset = 0x7f2c + (n - 3) * 12;
2367 set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
2368 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
2369 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset));
2370 ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
2371 return X86EMUL_CONTINUE;
2374 static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
2376 struct desc_struct desc;
2381 offset = 0x7e00 + n * 16;
2383 selector = GET_SMSTATE(u16, smbase, offset);
2384 rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8);
2385 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, offset + 4));
2386 set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
2387 base3 = GET_SMSTATE(u32, smbase, offset + 12);
2389 ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
2390 return X86EMUL_CONTINUE;
2393 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2394 u64 cr0, u64 cr3, u64 cr4)
2399 /* In order to later set CR4.PCIDE, CR3[11:0] must be zero. */
2401 if (cr4 & X86_CR4_PCIDE) {
2406 bad = ctxt->ops->set_cr(ctxt, 3, cr3);
2408 return X86EMUL_UNHANDLEABLE;
2411 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
2412 * Then enable protected mode. However, PCID cannot be enabled
2413 * if EFER.LMA=0, so set it separately.
2415 bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2417 return X86EMUL_UNHANDLEABLE;
2419 bad = ctxt->ops->set_cr(ctxt, 0, cr0);
2421 return X86EMUL_UNHANDLEABLE;
2423 if (cr4 & X86_CR4_PCIDE) {
2424 bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2426 return X86EMUL_UNHANDLEABLE;
2428 bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
2430 return X86EMUL_UNHANDLEABLE;
2435 return X86EMUL_CONTINUE;
2438 static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
2440 struct desc_struct desc;
2443 u32 val, cr0, cr3, cr4;
2446 cr0 = GET_SMSTATE(u32, smbase, 0x7ffc);
2447 cr3 = GET_SMSTATE(u32, smbase, 0x7ff8);
2448 ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
2449 ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0);
2451 for (i = 0; i < 8; i++)
2452 *reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4);
2454 val = GET_SMSTATE(u32, smbase, 0x7fcc);
2455 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2456 val = GET_SMSTATE(u32, smbase, 0x7fc8);
2457 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2459 selector = GET_SMSTATE(u32, smbase, 0x7fc4);
2460 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f64));
2461 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f60));
2462 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f5c));
2463 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
2465 selector = GET_SMSTATE(u32, smbase, 0x7fc0);
2466 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7f80));
2467 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7f7c));
2468 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7f78));
2469 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
2471 dt.address = GET_SMSTATE(u32, smbase, 0x7f74);
2472 dt.size = GET_SMSTATE(u32, smbase, 0x7f70);
2473 ctxt->ops->set_gdt(ctxt, &dt);
2475 dt.address = GET_SMSTATE(u32, smbase, 0x7f58);
2476 dt.size = GET_SMSTATE(u32, smbase, 0x7f54);
2477 ctxt->ops->set_idt(ctxt, &dt);
2479 for (i = 0; i < 6; i++) {
2480 int r = rsm_load_seg_32(ctxt, smbase, i);
2481 if (r != X86EMUL_CONTINUE)
2485 cr4 = GET_SMSTATE(u32, smbase, 0x7f14);
2487 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
2489 return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2492 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
2494 struct desc_struct desc;
2496 u64 val, cr0, cr3, cr4;
2501 for (i = 0; i < 16; i++)
2502 *reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
2504 ctxt->_eip = GET_SMSTATE(u64, smbase, 0x7f78);
2505 ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED;
2507 val = GET_SMSTATE(u32, smbase, 0x7f68);
2508 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2509 val = GET_SMSTATE(u32, smbase, 0x7f60);
2510 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2512 cr0 = GET_SMSTATE(u64, smbase, 0x7f58);
2513 cr3 = GET_SMSTATE(u64, smbase, 0x7f50);
2514 cr4 = GET_SMSTATE(u64, smbase, 0x7f48);
2515 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
2516 val = GET_SMSTATE(u64, smbase, 0x7ed0);
2517 ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
2519 selector = GET_SMSTATE(u32, smbase, 0x7e90);
2520 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e92) << 8);
2521 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e94));
2522 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e98));
2523 base3 = GET_SMSTATE(u32, smbase, 0x7e9c);
2524 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
2526 dt.size = GET_SMSTATE(u32, smbase, 0x7e84);
2527 dt.address = GET_SMSTATE(u64, smbase, 0x7e88);
2528 ctxt->ops->set_idt(ctxt, &dt);
2530 selector = GET_SMSTATE(u32, smbase, 0x7e70);
2531 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, 0x7e72) << 8);
2532 set_desc_limit(&desc, GET_SMSTATE(u32, smbase, 0x7e74));
2533 set_desc_base(&desc, GET_SMSTATE(u32, smbase, 0x7e78));
2534 base3 = GET_SMSTATE(u32, smbase, 0x7e7c);
2535 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
2537 dt.size = GET_SMSTATE(u32, smbase, 0x7e64);
2538 dt.address = GET_SMSTATE(u64, smbase, 0x7e68);
2539 ctxt->ops->set_gdt(ctxt, &dt);
2541 r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2542 if (r != X86EMUL_CONTINUE)
2545 for (i = 0; i < 6; i++) {
2546 r = rsm_load_seg_64(ctxt, smbase, i);
2547 if (r != X86EMUL_CONTINUE)
2551 return X86EMUL_CONTINUE;
2554 static int em_rsm(struct x86_emulate_ctxt *ctxt)
2556 unsigned long cr0, cr4, efer;
2560 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
2561 return emulate_ud(ctxt);
2564 * Get back to real mode, to prepare a safe state in which to load
2565 * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
2566 * supports long mode.
2568 cr4 = ctxt->ops->get_cr(ctxt, 4);
2569 if (emulator_has_longmode(ctxt)) {
2570 struct desc_struct cs_desc;
2572 /* Zero CR4.PCIDE before CR0.PG. */
2573 if (cr4 & X86_CR4_PCIDE) {
2574 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2575 cr4 &= ~X86_CR4_PCIDE;
2578 /* A 32-bit code segment is required to clear EFER.LMA. */
2579 memset(&cs_desc, 0, sizeof(cs_desc));
2581 cs_desc.s = cs_desc.g = cs_desc.p = 1;
2582 ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
2585 /* For the 64-bit case, this will clear EFER.LMA. */
2586 cr0 = ctxt->ops->get_cr(ctxt, 0);
2587 if (cr0 & X86_CR0_PE)
2588 ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2590 /* Now clear CR4.PAE (which must be done before clearing EFER.LME). */
2591 if (cr4 & X86_CR4_PAE)
2592 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2594 /* And finally go back to 32-bit mode. */
2596 ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2598 smbase = ctxt->ops->get_smbase(ctxt);
2601 * Give pre_leave_smm() a chance to make ISA-specific changes to the
2602 * vCPU state (e.g. enter guest mode) before loading state from the SMM
2605 if (ctxt->ops->pre_leave_smm(ctxt, smbase))
2606 return X86EMUL_UNHANDLEABLE;
2608 if (emulator_has_longmode(ctxt))
2609 ret = rsm_load_state_64(ctxt, smbase + 0x8000);
2611 ret = rsm_load_state_32(ctxt, smbase + 0x8000);
2613 if (ret != X86EMUL_CONTINUE) {
2614 /* FIXME: should triple fault */
2615 return X86EMUL_UNHANDLEABLE;
2618 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2619 ctxt->ops->set_nmi_mask(ctxt, false);
2621 ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
2622 ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
2623 return X86EMUL_CONTINUE;
2627 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2628 struct desc_struct *cs, struct desc_struct *ss)
2630 cs->l = 0; /* will be adjusted later */
2631 set_desc_base(cs, 0); /* flat segment */
2632 cs->g = 1; /* 4kb granularity */
2633 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2634 cs->type = 0x0b; /* Read, Execute, Accessed */
2636 cs->dpl = 0; /* will be adjusted later */
2641 set_desc_base(ss, 0); /* flat segment */
2642 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2643 ss->g = 1; /* 4kb granularity */
2645 ss->type = 0x03; /* Read/Write, Accessed */
2646 ss->d = 1; /* 32bit stack segment */
2653 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2655 u32 eax, ebx, ecx, edx;
2658 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2659 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2660 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2661 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2664 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2666 const struct x86_emulate_ops *ops = ctxt->ops;
2667 u32 eax, ebx, ecx, edx;
2670 * syscall should always be enabled in longmode - so only become
2671 * vendor specific (cpuid) if other modes are active...
2673 if (ctxt->mode == X86EMUL_MODE_PROT64)
2678 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2680 * Intel ("GenuineIntel")
2681 * remark: Intel CPUs only support "syscall" in 64bit
2682 * longmode. Also an 64bit guest with a
2683 * 32bit compat-app running will #UD !! While this
2684 * behaviour can be fixed (by emulating) into AMD
2685 * response - CPUs of AMD can't behave like Intel.
2687 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2688 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2689 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2692 /* AMD ("AuthenticAMD") */
2693 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2694 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2695 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2698 /* AMD ("AMDisbetter!") */
2699 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2700 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2701 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2704 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2708 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2710 const struct x86_emulate_ops *ops = ctxt->ops;
2711 struct desc_struct cs, ss;
2716 /* syscall is not available in real mode */
2717 if (ctxt->mode == X86EMUL_MODE_REAL ||
2718 ctxt->mode == X86EMUL_MODE_VM86)
2719 return emulate_ud(ctxt);
2721 if (!(em_syscall_is_enabled(ctxt)))
2722 return emulate_ud(ctxt);
2724 ops->get_msr(ctxt, MSR_EFER, &efer);
2725 setup_syscalls_segments(ctxt, &cs, &ss);
2727 if (!(efer & EFER_SCE))
2728 return emulate_ud(ctxt);
2730 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2732 cs_sel = (u16)(msr_data & 0xfffc);
2733 ss_sel = (u16)(msr_data + 8);
2735 if (efer & EFER_LMA) {
2739 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2740 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2742 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2743 if (efer & EFER_LMA) {
2744 #ifdef CONFIG_X86_64
2745 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2748 ctxt->mode == X86EMUL_MODE_PROT64 ?
2749 MSR_LSTAR : MSR_CSTAR, &msr_data);
2750 ctxt->_eip = msr_data;
2752 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2753 ctxt->eflags &= ~msr_data;
2754 ctxt->eflags |= X86_EFLAGS_FIXED;
2758 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2759 ctxt->_eip = (u32)msr_data;
2761 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2764 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2765 return X86EMUL_CONTINUE;
2768 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2770 const struct x86_emulate_ops *ops = ctxt->ops;
2771 struct desc_struct cs, ss;
2776 ops->get_msr(ctxt, MSR_EFER, &efer);
2777 /* inject #GP if in real mode */
2778 if (ctxt->mode == X86EMUL_MODE_REAL)
2779 return emulate_gp(ctxt, 0);
2782 * Not recognized on AMD in compat mode (but is recognized in legacy
2785 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2786 && !vendor_intel(ctxt))
2787 return emulate_ud(ctxt);
2789 /* sysenter/sysexit have not been tested in 64bit mode. */
2790 if (ctxt->mode == X86EMUL_MODE_PROT64)
2791 return X86EMUL_UNHANDLEABLE;
2793 setup_syscalls_segments(ctxt, &cs, &ss);
2795 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2796 if ((msr_data & 0xfffc) == 0x0)
2797 return emulate_gp(ctxt, 0);
2799 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2800 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2801 ss_sel = cs_sel + 8;
2802 if (efer & EFER_LMA) {
2807 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2808 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2810 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2811 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2813 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2814 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2817 return X86EMUL_CONTINUE;
2820 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2822 const struct x86_emulate_ops *ops = ctxt->ops;
2823 struct desc_struct cs, ss;
2824 u64 msr_data, rcx, rdx;
2826 u16 cs_sel = 0, ss_sel = 0;
2828 /* inject #GP if in real mode or Virtual 8086 mode */
2829 if (ctxt->mode == X86EMUL_MODE_REAL ||
2830 ctxt->mode == X86EMUL_MODE_VM86)
2831 return emulate_gp(ctxt, 0);
2833 setup_syscalls_segments(ctxt, &cs, &ss);
2835 if ((ctxt->rex_prefix & 0x8) != 0x0)
2836 usermode = X86EMUL_MODE_PROT64;
2838 usermode = X86EMUL_MODE_PROT32;
2840 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2841 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2845 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2847 case X86EMUL_MODE_PROT32:
2848 cs_sel = (u16)(msr_data + 16);
2849 if ((msr_data & 0xfffc) == 0x0)
2850 return emulate_gp(ctxt, 0);
2851 ss_sel = (u16)(msr_data + 24);
2855 case X86EMUL_MODE_PROT64:
2856 cs_sel = (u16)(msr_data + 32);
2857 if (msr_data == 0x0)
2858 return emulate_gp(ctxt, 0);
2859 ss_sel = cs_sel + 8;
2862 if (emul_is_noncanonical_address(rcx, ctxt) ||
2863 emul_is_noncanonical_address(rdx, ctxt))
2864 return emulate_gp(ctxt, 0);
2867 cs_sel |= SEGMENT_RPL_MASK;
2868 ss_sel |= SEGMENT_RPL_MASK;
2870 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2871 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2874 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2876 return X86EMUL_CONTINUE;
2879 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2882 if (ctxt->mode == X86EMUL_MODE_REAL)
2884 if (ctxt->mode == X86EMUL_MODE_VM86)
2886 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2887 return ctxt->ops->cpl(ctxt) > iopl;
2890 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2893 const struct x86_emulate_ops *ops = ctxt->ops;
2894 struct desc_struct tr_seg;
2897 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2898 unsigned mask = (1 << len) - 1;
2901 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2904 if (desc_limit_scaled(&tr_seg) < 103)
2906 base = get_desc_base(&tr_seg);
2907 #ifdef CONFIG_X86_64
2908 base |= ((u64)base3) << 32;
2910 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2911 if (r != X86EMUL_CONTINUE)
2913 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2915 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2916 if (r != X86EMUL_CONTINUE)
2918 if ((perm >> bit_idx) & mask)
2923 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2929 if (emulator_bad_iopl(ctxt))
2930 if (!emulator_io_port_access_allowed(ctxt, port, len))
2933 ctxt->perm_ok = true;
2938 static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2941 * Intel CPUs mask the counter and pointers in quite strange
2942 * manner when ECX is zero due to REP-string optimizations.
2944 #ifdef CONFIG_X86_64
2945 if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
2948 *reg_write(ctxt, VCPU_REGS_RCX) = 0;
2951 case 0xa4: /* movsb */
2952 case 0xa5: /* movsd/w */
2953 *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
2955 case 0xaa: /* stosb */
2956 case 0xab: /* stosd/w */
2957 *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
2962 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2963 struct tss_segment_16 *tss)
2965 tss->ip = ctxt->_eip;
2966 tss->flag = ctxt->eflags;
2967 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2968 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2969 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2970 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2971 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2972 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2973 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2974 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2976 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2977 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2978 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2979 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2980 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2983 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2984 struct tss_segment_16 *tss)
2989 ctxt->_eip = tss->ip;
2990 ctxt->eflags = tss->flag | 2;
2991 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2992 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2993 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2994 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2995 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2996 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2997 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2998 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
3001 * SDM says that segment selectors are loaded before segment
3004 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
3005 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3006 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3007 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3008 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3013 * Now load segment descriptors. If fault happens at this stage
3014 * it is handled in a context of new task
3016 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
3017 X86_TRANSFER_TASK_SWITCH, NULL);
3018 if (ret != X86EMUL_CONTINUE)
3020 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3021 X86_TRANSFER_TASK_SWITCH, NULL);
3022 if (ret != X86EMUL_CONTINUE)
3024 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3025 X86_TRANSFER_TASK_SWITCH, NULL);
3026 if (ret != X86EMUL_CONTINUE)
3028 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3029 X86_TRANSFER_TASK_SWITCH, NULL);
3030 if (ret != X86EMUL_CONTINUE)
3032 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3033 X86_TRANSFER_TASK_SWITCH, NULL);
3034 if (ret != X86EMUL_CONTINUE)
3037 return X86EMUL_CONTINUE;
3040 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
3041 u16 tss_selector, u16 old_tss_sel,
3042 ulong old_tss_base, struct desc_struct *new_desc)
3044 const struct x86_emulate_ops *ops = ctxt->ops;
3045 struct tss_segment_16 tss_seg;
3047 u32 new_tss_base = get_desc_base(new_desc);
3049 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3051 if (ret != X86EMUL_CONTINUE)
3054 save_state_to_tss16(ctxt, &tss_seg);
3056 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3058 if (ret != X86EMUL_CONTINUE)
3061 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
3063 if (ret != X86EMUL_CONTINUE)
3066 if (old_tss_sel != 0xffff) {
3067 tss_seg.prev_task_link = old_tss_sel;
3069 ret = ops->write_std(ctxt, new_tss_base,
3070 &tss_seg.prev_task_link,
3071 sizeof tss_seg.prev_task_link,
3073 if (ret != X86EMUL_CONTINUE)
3077 return load_state_from_tss16(ctxt, &tss_seg);
3080 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
3081 struct tss_segment_32 *tss)
3083 /* CR3 and ldt selector are not saved intentionally */
3084 tss->eip = ctxt->_eip;
3085 tss->eflags = ctxt->eflags;
3086 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
3087 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
3088 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
3089 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
3090 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
3091 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
3092 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
3093 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3095 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3096 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3097 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3098 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3099 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
3100 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3103 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
3104 struct tss_segment_32 *tss)
3109 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3110 return emulate_gp(ctxt, 0);
3111 ctxt->_eip = tss->eip;
3112 ctxt->eflags = tss->eflags | 2;
3114 /* General purpose registers */
3115 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
3116 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
3117 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
3118 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
3119 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
3120 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
3121 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
3122 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3125 * SDM says that segment selectors are loaded before segment
3126 * descriptors. This is important because CPL checks will
3129 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
3130 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3131 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3132 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3133 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3134 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
3135 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3138 * If we're switching between Protected Mode and VM86, we need to make
3139 * sure to update the mode before loading the segment descriptors so
3140 * that the selectors are interpreted correctly.
3142 if (ctxt->eflags & X86_EFLAGS_VM) {
3143 ctxt->mode = X86EMUL_MODE_VM86;
3146 ctxt->mode = X86EMUL_MODE_PROT32;
3151 * Now load segment descriptors. If fault happenes at this stage
3152 * it is handled in a context of new task
3154 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3155 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3156 if (ret != X86EMUL_CONTINUE)
3158 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3159 X86_TRANSFER_TASK_SWITCH, NULL);
3160 if (ret != X86EMUL_CONTINUE)
3162 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3163 X86_TRANSFER_TASK_SWITCH, NULL);
3164 if (ret != X86EMUL_CONTINUE)
3166 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3167 X86_TRANSFER_TASK_SWITCH, NULL);
3168 if (ret != X86EMUL_CONTINUE)
3170 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3171 X86_TRANSFER_TASK_SWITCH, NULL);
3172 if (ret != X86EMUL_CONTINUE)
3174 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3175 X86_TRANSFER_TASK_SWITCH, NULL);
3176 if (ret != X86EMUL_CONTINUE)
3178 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3179 X86_TRANSFER_TASK_SWITCH, NULL);
3184 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
3185 u16 tss_selector, u16 old_tss_sel,
3186 ulong old_tss_base, struct desc_struct *new_desc)
3188 const struct x86_emulate_ops *ops = ctxt->ops;
3189 struct tss_segment_32 tss_seg;
3191 u32 new_tss_base = get_desc_base(new_desc);
3192 u32 eip_offset = offsetof(struct tss_segment_32, eip);
3193 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3195 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3197 if (ret != X86EMUL_CONTINUE)
3200 save_state_to_tss32(ctxt, &tss_seg);
3202 /* Only GP registers and segment selectors are saved */
3203 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3204 ldt_sel_offset - eip_offset, &ctxt->exception);
3205 if (ret != X86EMUL_CONTINUE)
3208 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
3210 if (ret != X86EMUL_CONTINUE)
3213 if (old_tss_sel != 0xffff) {
3214 tss_seg.prev_task_link = old_tss_sel;
3216 ret = ops->write_std(ctxt, new_tss_base,
3217 &tss_seg.prev_task_link,
3218 sizeof tss_seg.prev_task_link,
3220 if (ret != X86EMUL_CONTINUE)
3224 return load_state_from_tss32(ctxt, &tss_seg);
3227 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3228 u16 tss_selector, int idt_index, int reason,
3229 bool has_error_code, u32 error_code)
3231 const struct x86_emulate_ops *ops = ctxt->ops;
3232 struct desc_struct curr_tss_desc, next_tss_desc;
3234 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3235 ulong old_tss_base =
3236 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3238 ulong desc_addr, dr7;
3240 /* FIXME: old_tss_base == ~0 ? */
3242 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3243 if (ret != X86EMUL_CONTINUE)
3245 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3246 if (ret != X86EMUL_CONTINUE)
3249 /* FIXME: check that next_tss_desc is tss */
3252 * Check privileges. The three cases are task switch caused by...
3254 * 1. jmp/call/int to task gate: Check against DPL of the task gate
3255 * 2. Exception/IRQ/iret: No check is performed
3256 * 3. jmp/call to TSS/task-gate: No check is performed since the
3257 * hardware checks it before exiting.
3259 if (reason == TASK_SWITCH_GATE) {
3260 if (idt_index != -1) {
3261 /* Software interrupts */
3262 struct desc_struct task_gate_desc;
3265 ret = read_interrupt_descriptor(ctxt, idt_index,
3267 if (ret != X86EMUL_CONTINUE)
3270 dpl = task_gate_desc.dpl;
3271 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
3272 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
3276 desc_limit = desc_limit_scaled(&next_tss_desc);
3277 if (!next_tss_desc.p ||
3278 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
3279 desc_limit < 0x2b)) {
3280 return emulate_ts(ctxt, tss_selector & 0xfffc);
3283 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3284 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
3285 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3288 if (reason == TASK_SWITCH_IRET)
3289 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
3291 /* set back link to prev task only if NT bit is set in eflags
3292 note that old_tss_sel is not used after this point */
3293 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
3294 old_tss_sel = 0xffff;
3296 if (next_tss_desc.type & 8)
3297 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3298 old_tss_base, &next_tss_desc);
3300 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3301 old_tss_base, &next_tss_desc);
3302 if (ret != X86EMUL_CONTINUE)
3305 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
3306 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3308 if (reason != TASK_SWITCH_IRET) {
3309 next_tss_desc.type |= (1 << 1); /* set busy flag */
3310 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3313 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
3314 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3316 if (has_error_code) {
3317 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3318 ctxt->lock_prefix = 0;
3319 ctxt->src.val = (unsigned long) error_code;
3320 ret = em_push(ctxt);
3323 ops->get_dr(ctxt, 7, &dr7);
3324 ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3329 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3330 u16 tss_selector, int idt_index, int reason,
3331 bool has_error_code, u32 error_code)
3335 invalidate_registers(ctxt);
3336 ctxt->_eip = ctxt->eip;
3337 ctxt->dst.type = OP_NONE;
3339 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3340 has_error_code, error_code);
3342 if (rc == X86EMUL_CONTINUE) {
3343 ctxt->eip = ctxt->_eip;
3344 writeback_registers(ctxt);
3347 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3350 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3353 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3355 register_address_increment(ctxt, reg, df * op->bytes);
3356 op->addr.mem.ea = register_address(ctxt, reg);
3359 static int em_das(struct x86_emulate_ctxt *ctxt)
3362 bool af, cf, old_cf;
3364 cf = ctxt->eflags & X86_EFLAGS_CF;
3370 af = ctxt->eflags & X86_EFLAGS_AF;
3371 if ((al & 0x0f) > 9 || af) {
3373 cf = old_cf | (al >= 250);
3378 if (old_al > 0x99 || old_cf) {
3384 /* Set PF, ZF, SF */
3385 ctxt->src.type = OP_IMM;
3387 ctxt->src.bytes = 1;
3388 fastop(ctxt, em_or);
3389 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3391 ctxt->eflags |= X86_EFLAGS_CF;
3393 ctxt->eflags |= X86_EFLAGS_AF;
3394 return X86EMUL_CONTINUE;
3397 static int em_aam(struct x86_emulate_ctxt *ctxt)
3401 if (ctxt->src.val == 0)
3402 return emulate_de(ctxt);
3404 al = ctxt->dst.val & 0xff;
3405 ah = al / ctxt->src.val;
3406 al %= ctxt->src.val;
3408 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3410 /* Set PF, ZF, SF */
3411 ctxt->src.type = OP_IMM;
3413 ctxt->src.bytes = 1;
3414 fastop(ctxt, em_or);
3416 return X86EMUL_CONTINUE;
3419 static int em_aad(struct x86_emulate_ctxt *ctxt)
3421 u8 al = ctxt->dst.val & 0xff;
3422 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3424 al = (al + (ah * ctxt->src.val)) & 0xff;
3426 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3428 /* Set PF, ZF, SF */
3429 ctxt->src.type = OP_IMM;
3431 ctxt->src.bytes = 1;
3432 fastop(ctxt, em_or);
3434 return X86EMUL_CONTINUE;
3437 static int em_call(struct x86_emulate_ctxt *ctxt)
3440 long rel = ctxt->src.val;
3442 ctxt->src.val = (unsigned long)ctxt->_eip;
3443 rc = jmp_rel(ctxt, rel);
3444 if (rc != X86EMUL_CONTINUE)
3446 return em_push(ctxt);
3449 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3454 struct desc_struct old_desc, new_desc;
3455 const struct x86_emulate_ops *ops = ctxt->ops;
3456 int cpl = ctxt->ops->cpl(ctxt);
3457 enum x86emul_mode prev_mode = ctxt->mode;
3459 old_eip = ctxt->_eip;
3460 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3462 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3463 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3464 X86_TRANSFER_CALL_JMP, &new_desc);
3465 if (rc != X86EMUL_CONTINUE)
3468 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3469 if (rc != X86EMUL_CONTINUE)
3472 ctxt->src.val = old_cs;
3474 if (rc != X86EMUL_CONTINUE)
3477 ctxt->src.val = old_eip;
3479 /* If we failed, we tainted the memory, but the very least we should
3481 if (rc != X86EMUL_CONTINUE) {
3482 pr_warn_once("faulting far call emulation tainted memory\n");
3487 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3488 ctxt->mode = prev_mode;
3493 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3498 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3499 if (rc != X86EMUL_CONTINUE)
3501 rc = assign_eip_near(ctxt, eip);
3502 if (rc != X86EMUL_CONTINUE)
3504 rsp_increment(ctxt, ctxt->src.val);
3505 return X86EMUL_CONTINUE;
3508 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3510 /* Write back the register source. */
3511 ctxt->src.val = ctxt->dst.val;
3512 write_register_operand(&ctxt->src);
3514 /* Write back the memory destination with implicit LOCK prefix. */
3515 ctxt->dst.val = ctxt->src.orig_val;
3516 ctxt->lock_prefix = 1;
3517 return X86EMUL_CONTINUE;
3520 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3522 ctxt->dst.val = ctxt->src2.val;
3523 return fastop(ctxt, em_imul);
3526 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3528 ctxt->dst.type = OP_REG;
3529 ctxt->dst.bytes = ctxt->src.bytes;
3530 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3531 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3533 return X86EMUL_CONTINUE;
3536 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3540 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3541 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3542 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3543 return X86EMUL_CONTINUE;
3546 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3550 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3551 return emulate_gp(ctxt, 0);
3552 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3553 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3554 return X86EMUL_CONTINUE;
3557 static int em_mov(struct x86_emulate_ctxt *ctxt)
3559 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3560 return X86EMUL_CONTINUE;
3563 #define FFL(x) bit(X86_FEATURE_##x)
3565 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3567 u32 ebx, ecx, edx, eax = 1;
3571 * Check MOVBE is set in the guest-visible CPUID leaf.
3573 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3574 if (!(ecx & FFL(MOVBE)))
3575 return emulate_ud(ctxt);
3577 switch (ctxt->op_bytes) {
3580 * From MOVBE definition: "...When the operand size is 16 bits,
3581 * the upper word of the destination register remains unchanged
3584 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3585 * rules so we have to do the operation almost per hand.
3587 tmp = (u16)ctxt->src.val;
3588 ctxt->dst.val &= ~0xffffUL;
3589 ctxt->dst.val |= (unsigned long)swab16(tmp);
3592 ctxt->dst.val = swab32((u32)ctxt->src.val);
3595 ctxt->dst.val = swab64(ctxt->src.val);
3600 return X86EMUL_CONTINUE;
3603 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3605 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3606 return emulate_gp(ctxt, 0);
3608 /* Disable writeback. */
3609 ctxt->dst.type = OP_NONE;
3610 return X86EMUL_CONTINUE;
3613 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3617 if (ctxt->mode == X86EMUL_MODE_PROT64)
3618 val = ctxt->src.val & ~0ULL;
3620 val = ctxt->src.val & ~0U;
3622 /* #UD condition is already handled. */
3623 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3624 return emulate_gp(ctxt, 0);
3626 /* Disable writeback. */
3627 ctxt->dst.type = OP_NONE;
3628 return X86EMUL_CONTINUE;
3631 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3635 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3636 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3637 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3638 return emulate_gp(ctxt, 0);
3640 return X86EMUL_CONTINUE;
3643 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3647 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3648 return emulate_gp(ctxt, 0);
3650 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3651 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3652 return X86EMUL_CONTINUE;
3655 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3657 if (ctxt->modrm_reg > VCPU_SREG_GS)
3658 return emulate_ud(ctxt);
3660 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3661 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3662 ctxt->dst.bytes = 2;
3663 return X86EMUL_CONTINUE;
3666 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3668 u16 sel = ctxt->src.val;
3670 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3671 return emulate_ud(ctxt);
3673 if (ctxt->modrm_reg == VCPU_SREG_SS)
3674 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3676 /* Disable writeback. */
3677 ctxt->dst.type = OP_NONE;
3678 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3681 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3683 u16 sel = ctxt->src.val;
3685 /* Disable writeback. */
3686 ctxt->dst.type = OP_NONE;
3687 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3690 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3692 u16 sel = ctxt->src.val;
3694 /* Disable writeback. */
3695 ctxt->dst.type = OP_NONE;
3696 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3699 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3704 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3705 if (rc == X86EMUL_CONTINUE)
3706 ctxt->ops->invlpg(ctxt, linear);
3707 /* Disable writeback. */
3708 ctxt->dst.type = OP_NONE;
3709 return X86EMUL_CONTINUE;
3712 static int em_clts(struct x86_emulate_ctxt *ctxt)
3716 cr0 = ctxt->ops->get_cr(ctxt, 0);
3718 ctxt->ops->set_cr(ctxt, 0, cr0);
3719 return X86EMUL_CONTINUE;
3722 static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3724 int rc = ctxt->ops->fix_hypercall(ctxt);
3726 if (rc != X86EMUL_CONTINUE)
3729 /* Let the processor re-execute the fixed hypercall */
3730 ctxt->_eip = ctxt->eip;
3731 /* Disable writeback. */
3732 ctxt->dst.type = OP_NONE;
3733 return X86EMUL_CONTINUE;
3736 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3737 void (*get)(struct x86_emulate_ctxt *ctxt,
3738 struct desc_ptr *ptr))
3740 struct desc_ptr desc_ptr;
3742 if (ctxt->mode == X86EMUL_MODE_PROT64)
3744 get(ctxt, &desc_ptr);
3745 if (ctxt->op_bytes == 2) {
3747 desc_ptr.address &= 0x00ffffff;
3749 /* Disable writeback. */
3750 ctxt->dst.type = OP_NONE;
3751 return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3752 &desc_ptr, 2 + ctxt->op_bytes);
3755 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3757 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3760 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3762 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3765 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3767 struct desc_ptr desc_ptr;
3770 if (ctxt->mode == X86EMUL_MODE_PROT64)
3772 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3773 &desc_ptr.size, &desc_ptr.address,
3775 if (rc != X86EMUL_CONTINUE)
3777 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3778 emul_is_noncanonical_address(desc_ptr.address, ctxt))
3779 return emulate_gp(ctxt, 0);
3781 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3783 ctxt->ops->set_idt(ctxt, &desc_ptr);
3784 /* Disable writeback. */
3785 ctxt->dst.type = OP_NONE;
3786 return X86EMUL_CONTINUE;
3789 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3791 return em_lgdt_lidt(ctxt, true);
3794 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3796 return em_lgdt_lidt(ctxt, false);
3799 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3801 if (ctxt->dst.type == OP_MEM)
3802 ctxt->dst.bytes = 2;
3803 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3804 return X86EMUL_CONTINUE;
3807 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3809 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3810 | (ctxt->src.val & 0x0f));
3811 ctxt->dst.type = OP_NONE;
3812 return X86EMUL_CONTINUE;
3815 static int em_loop(struct x86_emulate_ctxt *ctxt)
3817 int rc = X86EMUL_CONTINUE;
3819 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3820 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3821 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3822 rc = jmp_rel(ctxt, ctxt->src.val);
3827 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3829 int rc = X86EMUL_CONTINUE;
3831 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3832 rc = jmp_rel(ctxt, ctxt->src.val);
3837 static int em_in(struct x86_emulate_ctxt *ctxt)
3839 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3841 return X86EMUL_IO_NEEDED;
3843 return X86EMUL_CONTINUE;
3846 static int em_out(struct x86_emulate_ctxt *ctxt)
3848 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3850 /* Disable writeback. */
3851 ctxt->dst.type = OP_NONE;
3852 return X86EMUL_CONTINUE;
3855 static int em_cli(struct x86_emulate_ctxt *ctxt)
3857 if (emulator_bad_iopl(ctxt))
3858 return emulate_gp(ctxt, 0);
3860 ctxt->eflags &= ~X86_EFLAGS_IF;
3861 return X86EMUL_CONTINUE;
3864 static int em_sti(struct x86_emulate_ctxt *ctxt)
3866 if (emulator_bad_iopl(ctxt))
3867 return emulate_gp(ctxt, 0);
3869 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3870 ctxt->eflags |= X86_EFLAGS_IF;
3871 return X86EMUL_CONTINUE;
3874 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3876 u32 eax, ebx, ecx, edx;
3879 ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3880 if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3881 ctxt->ops->cpl(ctxt)) {
3882 return emulate_gp(ctxt, 0);
3885 eax = reg_read(ctxt, VCPU_REGS_RAX);
3886 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3887 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
3888 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3889 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3890 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3891 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3892 return X86EMUL_CONTINUE;
3895 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3899 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3901 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3903 ctxt->eflags &= ~0xffUL;
3904 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3905 return X86EMUL_CONTINUE;
3908 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3910 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3911 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3912 return X86EMUL_CONTINUE;
3915 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3917 switch (ctxt->op_bytes) {
3918 #ifdef CONFIG_X86_64
3920 asm("bswap %0" : "+r"(ctxt->dst.val));
3924 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3927 return X86EMUL_CONTINUE;
3930 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3932 /* emulating clflush regardless of cpuid */
3933 return X86EMUL_CONTINUE;
3936 static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3938 ctxt->dst.val = (s32) ctxt->src.val;
3939 return X86EMUL_CONTINUE;
3942 static int check_fxsr(struct x86_emulate_ctxt *ctxt)
3944 u32 eax = 1, ebx, ecx = 0, edx;
3946 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3947 if (!(edx & FFL(FXSR)))
3948 return emulate_ud(ctxt);
3950 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
3951 return emulate_nm(ctxt);
3954 * Don't emulate a case that should never be hit, instead of working
3955 * around a lack of fxsave64/fxrstor64 on old compilers.
3957 if (ctxt->mode >= X86EMUL_MODE_PROT64)
3958 return X86EMUL_UNHANDLEABLE;
3960 return X86EMUL_CONTINUE;
3964 * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
3965 * and restore MXCSR.
3967 static size_t __fxstate_size(int nregs)
3969 return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
3972 static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
3975 if (ctxt->mode == X86EMUL_MODE_PROT64)
3976 return __fxstate_size(16);
3978 cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
3979 return __fxstate_size(cr4_osfxsr ? 8 : 0);
3983 * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
3986 * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
3987 * preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
3989 * 3) 64-bit mode with REX.W prefix
3990 * - like (2), but XMM 8-15 are being saved and restored
3991 * 4) 64-bit mode without REX.W prefix
3992 * - like (3), but FIP and FDP are 64 bit
3994 * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
3995 * desired result. (4) is not emulated.
3997 * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
3998 * and FPU DS) should match.
4000 static int em_fxsave(struct x86_emulate_ctxt *ctxt)
4002 struct fxregs_state fx_state;
4005 rc = check_fxsr(ctxt);
4006 if (rc != X86EMUL_CONTINUE)
4009 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
4011 if (rc != X86EMUL_CONTINUE)
4014 return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
4015 fxstate_size(ctxt));
4019 * FXRSTOR might restore XMM registers not provided by the guest. Fill
4020 * in the host registers (via FXSAVE) instead, so they won't be modified.
4021 * (preemption has to stay disabled until FXRSTOR).
4023 * Use noinline to keep the stack for other functions called by callers small.
4025 static noinline int fxregs_fixup(struct fxregs_state *fx_state,
4026 const size_t used_size)
4028 struct fxregs_state fx_tmp;
4031 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
4032 memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
4033 __fxstate_size(16) - used_size);
4038 static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4040 struct fxregs_state fx_state;
4044 rc = check_fxsr(ctxt);
4045 if (rc != X86EMUL_CONTINUE)
4048 size = fxstate_size(ctxt);
4049 rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
4050 if (rc != X86EMUL_CONTINUE)
4053 if (size < __fxstate_size(16)) {
4054 rc = fxregs_fixup(&fx_state, size);
4055 if (rc != X86EMUL_CONTINUE)
4059 if (fx_state.mxcsr >> 16) {
4060 rc = emulate_gp(ctxt, 0);
4064 if (rc == X86EMUL_CONTINUE)
4065 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4071 static bool valid_cr(int nr)
4083 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
4085 if (!valid_cr(ctxt->modrm_reg))
4086 return emulate_ud(ctxt);
4088 return X86EMUL_CONTINUE;
4091 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
4093 u64 new_val = ctxt->src.val64;
4094 int cr = ctxt->modrm_reg;
4097 static u64 cr_reserved_bits[] = {
4098 0xffffffff00000000ULL,
4099 0, 0, 0, /* CR3 checked later */
4106 return emulate_ud(ctxt);
4108 if (new_val & cr_reserved_bits[cr])
4109 return emulate_gp(ctxt, 0);
4114 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
4115 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
4116 return emulate_gp(ctxt, 0);
4118 cr4 = ctxt->ops->get_cr(ctxt, 4);
4119 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4121 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
4122 !(cr4 & X86_CR4_PAE))
4123 return emulate_gp(ctxt, 0);
4130 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4131 if (efer & EFER_LMA) {
4133 u32 eax, ebx, ecx, edx;
4137 if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
4139 maxphyaddr = eax & 0xff;
4142 rsvd = rsvd_bits(maxphyaddr, 62);
4146 return emulate_gp(ctxt, 0);
4151 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4153 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
4154 return emulate_gp(ctxt, 0);
4160 return X86EMUL_CONTINUE;
4163 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
4167 ctxt->ops->get_dr(ctxt, 7, &dr7);
4169 /* Check if DR7.Global_Enable is set */
4170 return dr7 & (1 << 13);
4173 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
4175 int dr = ctxt->modrm_reg;
4179 return emulate_ud(ctxt);
4181 cr4 = ctxt->ops->get_cr(ctxt, 4);
4182 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
4183 return emulate_ud(ctxt);
4185 if (check_dr7_gd(ctxt)) {
4188 ctxt->ops->get_dr(ctxt, 6, &dr6);
4190 dr6 |= DR6_BD | DR6_RTM;
4191 ctxt->ops->set_dr(ctxt, 6, dr6);
4192 return emulate_db(ctxt);
4195 return X86EMUL_CONTINUE;
4198 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
4200 u64 new_val = ctxt->src.val64;
4201 int dr = ctxt->modrm_reg;
4203 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
4204 return emulate_gp(ctxt, 0);
4206 return check_dr_read(ctxt);
4209 static int check_svme(struct x86_emulate_ctxt *ctxt)
4213 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4215 if (!(efer & EFER_SVME))
4216 return emulate_ud(ctxt);
4218 return X86EMUL_CONTINUE;
4221 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
4223 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4225 /* Valid physical address? */
4226 if (rax & 0xffff000000000000ULL)
4227 return emulate_gp(ctxt, 0);
4229 return check_svme(ctxt);
4232 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
4234 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4236 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4237 return emulate_ud(ctxt);
4239 return X86EMUL_CONTINUE;
4242 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
4244 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4245 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4247 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4248 ctxt->ops->check_pmc(ctxt, rcx))
4249 return emulate_gp(ctxt, 0);
4251 return X86EMUL_CONTINUE;
4254 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
4256 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
4257 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4258 return emulate_gp(ctxt, 0);
4260 return X86EMUL_CONTINUE;
4263 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
4265 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
4266 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4267 return emulate_gp(ctxt, 0);
4269 return X86EMUL_CONTINUE;
4272 #define D(_y) { .flags = (_y) }
4273 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4274 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4275 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4276 #define N D(NotImpl)
4277 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4278 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4279 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4280 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4281 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4282 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4283 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4284 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4285 #define II(_f, _e, _i) \
4286 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4287 #define IIP(_f, _e, _i, _p) \
4288 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4289 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4290 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4292 #define D2bv(_f) D((_f) | ByteOp), D(_f)
4293 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4294 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
4295 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
4296 #define I2bvIP(_f, _e, _i, _p) \
4297 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4299 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4300 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4301 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4303 static const struct opcode group7_rm0[] = {
4305 I(SrcNone | Priv | EmulateOnUD, em_hypercall),
4309 static const struct opcode group7_rm1[] = {
4310 DI(SrcNone | Priv, monitor),
4311 DI(SrcNone | Priv, mwait),
4315 static const struct opcode group7_rm3[] = {
4316 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
4317 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
4318 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
4319 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
4320 DIP(SrcNone | Prot | Priv, stgi, check_svme),
4321 DIP(SrcNone | Prot | Priv, clgi, check_svme),
4322 DIP(SrcNone | Prot | Priv, skinit, check_svme),
4323 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
4326 static const struct opcode group7_rm7[] = {
4328 DIP(SrcNone, rdtscp, check_rdtsc),
4332 static const struct opcode group1[] = {
4334 F(Lock | PageTable, em_or),
4337 F(Lock | PageTable, em_and),
4343 static const struct opcode group1A[] = {
4344 I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4347 static const struct opcode group2[] = {
4348 F(DstMem | ModRM, em_rol),
4349 F(DstMem | ModRM, em_ror),
4350 F(DstMem | ModRM, em_rcl),
4351 F(DstMem | ModRM, em_rcr),
4352 F(DstMem | ModRM, em_shl),
4353 F(DstMem | ModRM, em_shr),
4354 F(DstMem | ModRM, em_shl),
4355 F(DstMem | ModRM, em_sar),
4358 static const struct opcode group3[] = {
4359 F(DstMem | SrcImm | NoWrite, em_test),
4360 F(DstMem | SrcImm | NoWrite, em_test),
4361 F(DstMem | SrcNone | Lock, em_not),
4362 F(DstMem | SrcNone | Lock, em_neg),
4363 F(DstXacc | Src2Mem, em_mul_ex),
4364 F(DstXacc | Src2Mem, em_imul_ex),
4365 F(DstXacc | Src2Mem, em_div_ex),
4366 F(DstXacc | Src2Mem, em_idiv_ex),
4369 static const struct opcode group4[] = {
4370 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4371 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4375 static const struct opcode group5[] = {
4376 F(DstMem | SrcNone | Lock, em_inc),
4377 F(DstMem | SrcNone | Lock, em_dec),
4378 I(SrcMem | NearBranch, em_call_near_abs),
4379 I(SrcMemFAddr | ImplicitOps, em_call_far),
4380 I(SrcMem | NearBranch, em_jmp_abs),
4381 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
4382 I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined),
4385 static const struct opcode group6[] = {
4386 DI(Prot | DstMem, sldt),
4387 DI(Prot | DstMem, str),
4388 II(Prot | Priv | SrcMem16, em_lldt, lldt),
4389 II(Prot | Priv | SrcMem16, em_ltr, ltr),
4393 static const struct group_dual group7 = { {
4394 II(Mov | DstMem, em_sgdt, sgdt),
4395 II(Mov | DstMem, em_sidt, sidt),
4396 II(SrcMem | Priv, em_lgdt, lgdt),
4397 II(SrcMem | Priv, em_lidt, lidt),
4398 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4399 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4400 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
4404 N, EXT(0, group7_rm3),
4405 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4406 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4410 static const struct opcode group8[] = {
4412 F(DstMem | SrcImmByte | NoWrite, em_bt),
4413 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
4414 F(DstMem | SrcImmByte | Lock, em_btr),
4415 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
4418 static const struct group_dual group9 = { {
4419 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4421 N, N, N, N, N, N, N, N,
4424 static const struct opcode group11[] = {
4425 I(DstMem | SrcImm | Mov | PageTable, em_mov),
4429 static const struct gprefix pfx_0f_ae_7 = {
4430 I(SrcMem | ByteOp, em_clflush), N, N, N,
4433 static const struct group_dual group15 = { {
4434 I(ModRM | Aligned16, em_fxsave),
4435 I(ModRM | Aligned16, em_fxrstor),
4436 N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4438 N, N, N, N, N, N, N, N,
4441 static const struct gprefix pfx_0f_6f_0f_7f = {
4442 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4445 static const struct instr_dual instr_dual_0f_2b = {
4449 static const struct gprefix pfx_0f_2b = {
4450 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4453 static const struct gprefix pfx_0f_28_0f_29 = {
4454 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4457 static const struct gprefix pfx_0f_e7 = {
4458 N, I(Sse, em_mov), N, N,
4461 static const struct escape escape_d9 = { {
4462 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4465 N, N, N, N, N, N, N, N,
4467 N, N, N, N, N, N, N, N,
4469 N, N, N, N, N, N, N, N,
4471 N, N, N, N, N, N, N, N,
4473 N, N, N, N, N, N, N, N,
4475 N, N, N, N, N, N, N, N,
4477 N, N, N, N, N, N, N, N,
4479 N, N, N, N, N, N, N, N,
4482 static const struct escape escape_db = { {
4483 N, N, N, N, N, N, N, N,
4486 N, N, N, N, N, N, N, N,
4488 N, N, N, N, N, N, N, N,
4490 N, N, N, N, N, N, N, N,
4492 N, N, N, N, N, N, N, N,
4494 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4496 N, N, N, N, N, N, N, N,
4498 N, N, N, N, N, N, N, N,
4500 N, N, N, N, N, N, N, N,
4503 static const struct escape escape_dd = { {
4504 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4507 N, N, N, N, N, N, N, N,
4509 N, N, N, N, N, N, N, N,
4511 N, N, N, N, N, N, N, N,
4513 N, N, N, N, N, N, N, N,
4515 N, N, N, N, N, N, N, N,
4517 N, N, N, N, N, N, N, N,
4519 N, N, N, N, N, N, N, N,
4521 N, N, N, N, N, N, N, N,
4524 static const struct instr_dual instr_dual_0f_c3 = {
4525 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4528 static const struct mode_dual mode_dual_63 = {
4529 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4532 static const struct opcode opcode_table[256] = {
4534 F6ALU(Lock, em_add),
4535 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4536 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4538 F6ALU(Lock | PageTable, em_or),
4539 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4542 F6ALU(Lock, em_adc),
4543 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4544 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4546 F6ALU(Lock, em_sbb),
4547 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4548 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4550 F6ALU(Lock | PageTable, em_and), N, N,
4552 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4554 F6ALU(Lock, em_xor), N, N,
4556 F6ALU(NoWrite, em_cmp), N, N,
4558 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4560 X8(I(SrcReg | Stack, em_push)),
4562 X8(I(DstReg | Stack, em_pop)),
4564 I(ImplicitOps | Stack | No64, em_pusha),
4565 I(ImplicitOps | Stack | No64, em_popa),
4566 N, MD(ModRM, &mode_dual_63),
4569 I(SrcImm | Mov | Stack, em_push),
4570 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4571 I(SrcImmByte | Mov | Stack, em_push),
4572 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4573 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4574 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4576 X16(D(SrcImmByte | NearBranch)),
4578 G(ByteOp | DstMem | SrcImm, group1),
4579 G(DstMem | SrcImm, group1),
4580 G(ByteOp | DstMem | SrcImm | No64, group1),
4581 G(DstMem | SrcImmByte, group1),
4582 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4583 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4585 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4586 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4587 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4588 D(ModRM | SrcMem | NoAccess | DstReg),
4589 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4592 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4594 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4595 I(SrcImmFAddr | No64, em_call_far), N,
4596 II(ImplicitOps | Stack, em_pushf, pushf),
4597 II(ImplicitOps | Stack, em_popf, popf),
4598 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4600 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4601 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4602 I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4603 F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4605 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4606 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4607 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4608 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4610 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4612 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4614 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4615 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4616 I(ImplicitOps | NearBranch, em_ret),
4617 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4618 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4619 G(ByteOp, group11), G(0, group11),
4621 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4622 I(ImplicitOps | SrcImmU16, em_ret_far_imm),
4623 I(ImplicitOps, em_ret_far),
4624 D(ImplicitOps), DI(SrcImmByte, intn),
4625 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4627 G(Src2One | ByteOp, group2), G(Src2One, group2),
4628 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4629 I(DstAcc | SrcImmUByte | No64, em_aam),
4630 I(DstAcc | SrcImmUByte | No64, em_aad),
4631 F(DstAcc | ByteOp | No64, em_salc),
4632 I(DstAcc | SrcXLat | ByteOp, em_mov),
4634 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4636 X3(I(SrcImmByte | NearBranch, em_loop)),
4637 I(SrcImmByte | NearBranch, em_jcxz),
4638 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4639 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4641 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4642 I(SrcImmFAddr | No64, em_jmp_far),
4643 D(SrcImmByte | ImplicitOps | NearBranch),
4644 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4645 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4647 N, DI(ImplicitOps, icebp), N, N,
4648 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4649 G(ByteOp, group3), G(0, group3),
4651 D(ImplicitOps), D(ImplicitOps),
4652 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4653 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4656 static const struct opcode twobyte_table[256] = {
4658 G(0, group6), GD(0, &group7), N, N,
4659 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4660 II(ImplicitOps | Priv, em_clts, clts), N,
4661 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4662 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4664 N, N, N, N, N, N, N, N,
4665 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4666 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4668 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4669 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4670 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4672 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4675 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4676 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4677 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4680 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4681 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4682 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4683 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4684 I(ImplicitOps | EmulateOnUD, em_sysenter),
4685 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4687 N, N, N, N, N, N, N, N,
4689 X16(D(DstReg | SrcMem | ModRM)),
4691 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4696 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4701 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4703 X16(D(SrcImm | NearBranch)),
4705 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4707 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4708 II(ImplicitOps, em_cpuid, cpuid),
4709 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4710 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4711 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4713 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4714 II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4715 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4716 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4717 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4718 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4720 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4721 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4722 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4723 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4724 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4725 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4729 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4730 I(DstReg | SrcMem | ModRM, em_bsf_c),
4731 I(DstReg | SrcMem | ModRM, em_bsr_c),
4732 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4734 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4735 N, ID(0, &instr_dual_0f_c3),
4736 N, N, N, GD(0, &group9),
4738 X8(I(DstReg, em_bswap)),
4740 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4742 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4743 N, N, N, N, N, N, N, N,
4745 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4748 static const struct instr_dual instr_dual_0f_38_f0 = {
4749 I(DstReg | SrcMem | Mov, em_movbe), N
4752 static const struct instr_dual instr_dual_0f_38_f1 = {
4753 I(DstMem | SrcReg | Mov, em_movbe), N
4756 static const struct gprefix three_byte_0f_38_f0 = {
4757 ID(0, &instr_dual_0f_38_f0), N, N, N
4760 static const struct gprefix three_byte_0f_38_f1 = {
4761 ID(0, &instr_dual_0f_38_f1), N, N, N
4765 * Insns below are selected by the prefix which indexed by the third opcode
4768 static const struct opcode opcode_map_0f_38[256] = {
4770 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4772 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4774 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4775 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4796 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4800 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4806 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4807 unsigned size, bool sign_extension)
4809 int rc = X86EMUL_CONTINUE;
4813 op->addr.mem.ea = ctxt->_eip;
4814 /* NB. Immediates are sign-extended as necessary. */
4815 switch (op->bytes) {
4817 op->val = insn_fetch(s8, ctxt);
4820 op->val = insn_fetch(s16, ctxt);
4823 op->val = insn_fetch(s32, ctxt);
4826 op->val = insn_fetch(s64, ctxt);
4829 if (!sign_extension) {
4830 switch (op->bytes) {
4838 op->val &= 0xffffffff;
4846 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4849 int rc = X86EMUL_CONTINUE;
4853 decode_register_operand(ctxt, op);
4856 rc = decode_imm(ctxt, op, 1, false);
4859 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4863 if (ctxt->d & BitOp)
4864 fetch_bit_operand(ctxt);
4865 op->orig_val = op->val;
4868 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4872 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4873 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4874 fetch_register_operand(op);
4875 op->orig_val = op->val;
4879 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4880 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4881 fetch_register_operand(op);
4882 op->orig_val = op->val;
4885 if (ctxt->d & ByteOp) {
4890 op->bytes = ctxt->op_bytes;
4891 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4892 fetch_register_operand(op);
4893 op->orig_val = op->val;
4897 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4899 register_address(ctxt, VCPU_REGS_RDI);
4900 op->addr.mem.seg = VCPU_SREG_ES;
4907 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4908 fetch_register_operand(op);
4913 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4916 rc = decode_imm(ctxt, op, 1, true);
4924 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4927 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4930 ctxt->memop.bytes = 1;
4931 if (ctxt->memop.type == OP_REG) {
4932 ctxt->memop.addr.reg = decode_register(ctxt,
4933 ctxt->modrm_rm, true);
4934 fetch_register_operand(&ctxt->memop);
4938 ctxt->memop.bytes = 2;
4941 ctxt->memop.bytes = 4;
4944 rc = decode_imm(ctxt, op, 2, false);
4947 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4951 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4953 register_address(ctxt, VCPU_REGS_RSI);
4954 op->addr.mem.seg = ctxt->seg_override;
4960 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4963 reg_read(ctxt, VCPU_REGS_RBX) +
4964 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4965 op->addr.mem.seg = ctxt->seg_override;
4970 op->addr.mem.ea = ctxt->_eip;
4971 op->bytes = ctxt->op_bytes + 2;
4972 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4975 ctxt->memop.bytes = ctxt->op_bytes + 2;
4979 op->val = VCPU_SREG_ES;
4983 op->val = VCPU_SREG_CS;
4987 op->val = VCPU_SREG_SS;
4991 op->val = VCPU_SREG_DS;
4995 op->val = VCPU_SREG_FS;
4999 op->val = VCPU_SREG_GS;
5002 /* Special instructions do their own operand decoding. */
5004 op->type = OP_NONE; /* Disable writeback. */
5012 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
5014 int rc = X86EMUL_CONTINUE;
5015 int mode = ctxt->mode;
5016 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
5017 bool op_prefix = false;
5018 bool has_seg_override = false;
5019 struct opcode opcode;
5021 struct desc_struct desc;
5023 ctxt->memop.type = OP_NONE;
5024 ctxt->memopp = NULL;
5025 ctxt->_eip = ctxt->eip;
5026 ctxt->fetch.ptr = ctxt->fetch.data;
5027 ctxt->fetch.end = ctxt->fetch.data + insn_len;
5028 ctxt->opcode_len = 1;
5030 memcpy(ctxt->fetch.data, insn, insn_len);
5032 rc = __do_insn_fetch_bytes(ctxt, 1);
5033 if (rc != X86EMUL_CONTINUE)
5038 case X86EMUL_MODE_REAL:
5039 case X86EMUL_MODE_VM86:
5040 def_op_bytes = def_ad_bytes = 2;
5041 ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
5043 def_op_bytes = def_ad_bytes = 4;
5045 case X86EMUL_MODE_PROT16:
5046 def_op_bytes = def_ad_bytes = 2;
5048 case X86EMUL_MODE_PROT32:
5049 def_op_bytes = def_ad_bytes = 4;
5051 #ifdef CONFIG_X86_64
5052 case X86EMUL_MODE_PROT64:
5058 return EMULATION_FAILED;
5061 ctxt->op_bytes = def_op_bytes;
5062 ctxt->ad_bytes = def_ad_bytes;
5064 /* Legacy prefixes. */
5066 switch (ctxt->b = insn_fetch(u8, ctxt)) {
5067 case 0x66: /* operand-size override */
5069 /* switch between 2/4 bytes */
5070 ctxt->op_bytes = def_op_bytes ^ 6;
5072 case 0x67: /* address-size override */
5073 if (mode == X86EMUL_MODE_PROT64)
5074 /* switch between 4/8 bytes */
5075 ctxt->ad_bytes = def_ad_bytes ^ 12;
5077 /* switch between 2/4 bytes */
5078 ctxt->ad_bytes = def_ad_bytes ^ 6;
5080 case 0x26: /* ES override */
5081 case 0x2e: /* CS override */
5082 case 0x36: /* SS override */
5083 case 0x3e: /* DS override */
5084 has_seg_override = true;
5085 ctxt->seg_override = (ctxt->b >> 3) & 3;
5087 case 0x64: /* FS override */
5088 case 0x65: /* GS override */
5089 has_seg_override = true;
5090 ctxt->seg_override = ctxt->b & 7;
5092 case 0x40 ... 0x4f: /* REX */
5093 if (mode != X86EMUL_MODE_PROT64)
5095 ctxt->rex_prefix = ctxt->b;
5097 case 0xf0: /* LOCK */
5098 ctxt->lock_prefix = 1;
5100 case 0xf2: /* REPNE/REPNZ */
5101 case 0xf3: /* REP/REPE/REPZ */
5102 ctxt->rep_prefix = ctxt->b;
5108 /* Any legacy prefix after a REX prefix nullifies its effect. */
5110 ctxt->rex_prefix = 0;
5116 if (ctxt->rex_prefix & 8)
5117 ctxt->op_bytes = 8; /* REX.W */
5119 /* Opcode byte(s). */
5120 opcode = opcode_table[ctxt->b];
5121 /* Two-byte opcode? */
5122 if (ctxt->b == 0x0f) {
5123 ctxt->opcode_len = 2;
5124 ctxt->b = insn_fetch(u8, ctxt);
5125 opcode = twobyte_table[ctxt->b];
5127 /* 0F_38 opcode map */
5128 if (ctxt->b == 0x38) {
5129 ctxt->opcode_len = 3;
5130 ctxt->b = insn_fetch(u8, ctxt);
5131 opcode = opcode_map_0f_38[ctxt->b];
5134 ctxt->d = opcode.flags;
5136 if (ctxt->d & ModRM)
5137 ctxt->modrm = insn_fetch(u8, ctxt);
5139 /* vex-prefix instructions are not implemented */
5140 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
5141 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
5145 while (ctxt->d & GroupMask) {
5146 switch (ctxt->d & GroupMask) {
5148 goffset = (ctxt->modrm >> 3) & 7;
5149 opcode = opcode.u.group[goffset];
5152 goffset = (ctxt->modrm >> 3) & 7;
5153 if ((ctxt->modrm >> 6) == 3)
5154 opcode = opcode.u.gdual->mod3[goffset];
5156 opcode = opcode.u.gdual->mod012[goffset];
5159 goffset = ctxt->modrm & 7;
5160 opcode = opcode.u.group[goffset];
5163 if (ctxt->rep_prefix && op_prefix)
5164 return EMULATION_FAILED;
5165 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
5166 switch (simd_prefix) {
5167 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
5168 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
5169 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
5170 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
5174 if (ctxt->modrm > 0xbf)
5175 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
5177 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
5180 if ((ctxt->modrm >> 6) == 3)
5181 opcode = opcode.u.idual->mod3;
5183 opcode = opcode.u.idual->mod012;
5186 if (ctxt->mode == X86EMUL_MODE_PROT64)
5187 opcode = opcode.u.mdual->mode64;
5189 opcode = opcode.u.mdual->mode32;
5192 return EMULATION_FAILED;
5195 ctxt->d &= ~(u64)GroupMask;
5196 ctxt->d |= opcode.flags;
5201 return EMULATION_FAILED;
5203 ctxt->execute = opcode.u.execute;
5205 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
5206 return EMULATION_FAILED;
5208 if (unlikely(ctxt->d &
5209 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
5212 * These are copied unconditionally here, and checked unconditionally
5213 * in x86_emulate_insn.
5215 ctxt->check_perm = opcode.check_perm;
5216 ctxt->intercept = opcode.intercept;
5218 if (ctxt->d & NotImpl)
5219 return EMULATION_FAILED;
5221 if (mode == X86EMUL_MODE_PROT64) {
5222 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
5224 else if (ctxt->d & NearBranch)
5228 if (ctxt->d & Op3264) {
5229 if (mode == X86EMUL_MODE_PROT64)
5235 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5239 ctxt->op_bytes = 16;
5240 else if (ctxt->d & Mmx)
5244 /* ModRM and SIB bytes. */
5245 if (ctxt->d & ModRM) {
5246 rc = decode_modrm(ctxt, &ctxt->memop);
5247 if (!has_seg_override) {
5248 has_seg_override = true;
5249 ctxt->seg_override = ctxt->modrm_seg;
5251 } else if (ctxt->d & MemAbs)
5252 rc = decode_abs(ctxt, &ctxt->memop);
5253 if (rc != X86EMUL_CONTINUE)
5256 if (!has_seg_override)
5257 ctxt->seg_override = VCPU_SREG_DS;
5259 ctxt->memop.addr.mem.seg = ctxt->seg_override;
5262 * Decode and fetch the source operand: register, memory
5265 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5266 if (rc != X86EMUL_CONTINUE)
5270 * Decode and fetch the second source operand: register, memory
5273 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5274 if (rc != X86EMUL_CONTINUE)
5277 /* Decode and fetch the destination operand: register or memory. */
5278 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5280 if (ctxt->rip_relative && likely(ctxt->memopp))
5281 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5282 ctxt->memopp->addr.mem.ea + ctxt->_eip);
5285 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5288 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5290 return ctxt->d & PageTable;
5293 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5295 /* The second termination condition only applies for REPE
5296 * and REPNE. Test if the repeat string operation prefix is
5297 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5298 * corresponding termination condition according to:
5299 * - if REPE/REPZ and ZF = 0 then done
5300 * - if REPNE/REPNZ and ZF = 1 then done
5302 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5303 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5304 && (((ctxt->rep_prefix == REPE_PREFIX) &&
5305 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5306 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
5307 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5313 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5317 rc = asm_safe("fwait");
5319 if (unlikely(rc != X86EMUL_CONTINUE))
5320 return emulate_exception(ctxt, MF_VECTOR, 0, false);
5322 return X86EMUL_CONTINUE;
5325 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
5328 if (op->type == OP_MM)
5329 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
5332 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
5334 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5336 if (!(ctxt->d & ByteOp))
5337 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5339 asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
5340 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5341 [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
5342 : "c"(ctxt->src2.val));
5344 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5345 if (!fop) /* exception is returned in fop variable */
5346 return emulate_de(ctxt);
5347 return X86EMUL_CONTINUE;
5350 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5352 memset(&ctxt->rip_relative, 0,
5353 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5355 ctxt->io_read.pos = 0;
5356 ctxt->io_read.end = 0;
5357 ctxt->mem_read.end = 0;
5360 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5362 const struct x86_emulate_ops *ops = ctxt->ops;
5363 int rc = X86EMUL_CONTINUE;
5364 int saved_dst_type = ctxt->dst.type;
5365 unsigned emul_flags;
5367 ctxt->mem_read.pos = 0;
5369 /* LOCK prefix is allowed only with some instructions */
5370 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5371 rc = emulate_ud(ctxt);
5375 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5376 rc = emulate_ud(ctxt);
5380 emul_flags = ctxt->ops->get_hflags(ctxt);
5381 if (unlikely(ctxt->d &
5382 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5383 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5384 (ctxt->d & Undefined)) {
5385 rc = emulate_ud(ctxt);
5389 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5390 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5391 rc = emulate_ud(ctxt);
5395 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5396 rc = emulate_nm(ctxt);
5400 if (ctxt->d & Mmx) {
5401 rc = flush_pending_x87_faults(ctxt);
5402 if (rc != X86EMUL_CONTINUE)
5405 * Now that we know the fpu is exception safe, we can fetch
5408 fetch_possible_mmx_operand(ctxt, &ctxt->src);
5409 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
5410 if (!(ctxt->d & Mov))
5411 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
5414 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5415 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5416 X86_ICPT_PRE_EXCEPT);
5417 if (rc != X86EMUL_CONTINUE)
5421 /* Instruction can only be executed in protected mode */
5422 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5423 rc = emulate_ud(ctxt);
5427 /* Privileged instruction can be executed only in CPL=0 */
5428 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5429 if (ctxt->d & PrivUD)
5430 rc = emulate_ud(ctxt);
5432 rc = emulate_gp(ctxt, 0);
5436 /* Do instruction specific permission checks */
5437 if (ctxt->d & CheckPerm) {
5438 rc = ctxt->check_perm(ctxt);
5439 if (rc != X86EMUL_CONTINUE)
5443 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5444 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5445 X86_ICPT_POST_EXCEPT);
5446 if (rc != X86EMUL_CONTINUE)
5450 if (ctxt->rep_prefix && (ctxt->d & String)) {
5451 /* All REP prefixes have the same first termination condition */
5452 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5453 string_registers_quirk(ctxt);
5454 ctxt->eip = ctxt->_eip;
5455 ctxt->eflags &= ~X86_EFLAGS_RF;
5461 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5462 rc = segmented_read(ctxt, ctxt->src.addr.mem,
5463 ctxt->src.valptr, ctxt->src.bytes);
5464 if (rc != X86EMUL_CONTINUE)
5466 ctxt->src.orig_val64 = ctxt->src.val64;
5469 if (ctxt->src2.type == OP_MEM) {
5470 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5471 &ctxt->src2.val, ctxt->src2.bytes);
5472 if (rc != X86EMUL_CONTINUE)
5476 if ((ctxt->d & DstMask) == ImplicitOps)
5480 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5481 /* optimisation - avoid slow emulated read if Mov */
5482 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5483 &ctxt->dst.val, ctxt->dst.bytes);
5484 if (rc != X86EMUL_CONTINUE) {
5485 if (!(ctxt->d & NoWrite) &&
5486 rc == X86EMUL_PROPAGATE_FAULT &&
5487 ctxt->exception.vector == PF_VECTOR)
5488 ctxt->exception.error_code |= PFERR_WRITE_MASK;
5492 /* Copy full 64-bit value for CMPXCHG8B. */
5493 ctxt->dst.orig_val64 = ctxt->dst.val64;
5497 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5498 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5499 X86_ICPT_POST_MEMACCESS);
5500 if (rc != X86EMUL_CONTINUE)
5504 if (ctxt->rep_prefix && (ctxt->d & String))
5505 ctxt->eflags |= X86_EFLAGS_RF;
5507 ctxt->eflags &= ~X86_EFLAGS_RF;
5509 if (ctxt->execute) {
5510 if (ctxt->d & Fastop) {
5511 void (*fop)(struct fastop *) = (void *)ctxt->execute;
5512 rc = fastop(ctxt, fop);
5513 if (rc != X86EMUL_CONTINUE)
5517 rc = ctxt->execute(ctxt);
5518 if (rc != X86EMUL_CONTINUE)
5523 if (ctxt->opcode_len == 2)
5525 else if (ctxt->opcode_len == 3)
5526 goto threebyte_insn;
5529 case 0x70 ... 0x7f: /* jcc (short) */
5530 if (test_cc(ctxt->b, ctxt->eflags))
5531 rc = jmp_rel(ctxt, ctxt->src.val);
5533 case 0x8d: /* lea r16/r32, m */
5534 ctxt->dst.val = ctxt->src.addr.mem.ea;
5536 case 0x90 ... 0x97: /* nop / xchg reg, rax */
5537 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5538 ctxt->dst.type = OP_NONE;
5542 case 0x98: /* cbw/cwde/cdqe */
5543 switch (ctxt->op_bytes) {
5544 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5545 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5546 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5549 case 0xcc: /* int3 */
5550 rc = emulate_int(ctxt, 3);
5552 case 0xcd: /* int n */
5553 rc = emulate_int(ctxt, ctxt->src.val);
5555 case 0xce: /* into */
5556 if (ctxt->eflags & X86_EFLAGS_OF)
5557 rc = emulate_int(ctxt, 4);
5559 case 0xe9: /* jmp rel */
5560 case 0xeb: /* jmp rel short */
5561 rc = jmp_rel(ctxt, ctxt->src.val);
5562 ctxt->dst.type = OP_NONE; /* Disable writeback. */
5564 case 0xf4: /* hlt */
5565 ctxt->ops->halt(ctxt);
5567 case 0xf5: /* cmc */
5568 /* complement carry flag from eflags reg */
5569 ctxt->eflags ^= X86_EFLAGS_CF;
5571 case 0xf8: /* clc */
5572 ctxt->eflags &= ~X86_EFLAGS_CF;
5574 case 0xf9: /* stc */
5575 ctxt->eflags |= X86_EFLAGS_CF;
5577 case 0xfc: /* cld */
5578 ctxt->eflags &= ~X86_EFLAGS_DF;
5580 case 0xfd: /* std */
5581 ctxt->eflags |= X86_EFLAGS_DF;
5584 goto cannot_emulate;
5587 if (rc != X86EMUL_CONTINUE)
5591 if (ctxt->d & SrcWrite) {
5592 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5593 rc = writeback(ctxt, &ctxt->src);
5594 if (rc != X86EMUL_CONTINUE)
5597 if (!(ctxt->d & NoWrite)) {
5598 rc = writeback(ctxt, &ctxt->dst);
5599 if (rc != X86EMUL_CONTINUE)
5604 * restore dst type in case the decoding will be reused
5605 * (happens for string instruction )
5607 ctxt->dst.type = saved_dst_type;
5609 if ((ctxt->d & SrcMask) == SrcSI)
5610 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5612 if ((ctxt->d & DstMask) == DstDI)
5613 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5615 if (ctxt->rep_prefix && (ctxt->d & String)) {
5617 struct read_cache *r = &ctxt->io_read;
5618 if ((ctxt->d & SrcMask) == SrcSI)
5619 count = ctxt->src.count;
5621 count = ctxt->dst.count;
5622 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5624 if (!string_insn_completed(ctxt)) {
5626 * Re-enter guest when pio read ahead buffer is empty
5627 * or, if it is not used, after each 1024 iteration.
5629 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5630 (r->end == 0 || r->end != r->pos)) {
5632 * Reset read cache. Usually happens before
5633 * decode, but since instruction is restarted
5634 * we have to do it here.
5636 ctxt->mem_read.end = 0;
5637 writeback_registers(ctxt);
5638 return EMULATION_RESTART;
5640 goto done; /* skip rip writeback */
5642 ctxt->eflags &= ~X86_EFLAGS_RF;
5645 ctxt->eip = ctxt->_eip;
5648 if (rc == X86EMUL_PROPAGATE_FAULT) {
5649 WARN_ON(ctxt->exception.vector > 0x1f);
5650 ctxt->have_exception = true;
5652 if (rc == X86EMUL_INTERCEPTED)
5653 return EMULATION_INTERCEPTED;
5655 if (rc == X86EMUL_CONTINUE)
5656 writeback_registers(ctxt);
5658 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5662 case 0x09: /* wbinvd */
5663 (ctxt->ops->wbinvd)(ctxt);
5665 case 0x08: /* invd */
5666 case 0x0d: /* GrpP (prefetch) */
5667 case 0x18: /* Grp16 (prefetch/nop) */
5668 case 0x1f: /* nop */
5670 case 0x20: /* mov cr, reg */
5671 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5673 case 0x21: /* mov from dr to reg */
5674 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5676 case 0x40 ... 0x4f: /* cmov */
5677 if (test_cc(ctxt->b, ctxt->eflags))
5678 ctxt->dst.val = ctxt->src.val;
5679 else if (ctxt->op_bytes != 4)
5680 ctxt->dst.type = OP_NONE; /* no writeback */
5682 case 0x80 ... 0x8f: /* jnz rel, etc*/
5683 if (test_cc(ctxt->b, ctxt->eflags))
5684 rc = jmp_rel(ctxt, ctxt->src.val);
5686 case 0x90 ... 0x9f: /* setcc r/m8 */
5687 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5689 case 0xb6 ... 0xb7: /* movzx */
5690 ctxt->dst.bytes = ctxt->op_bytes;
5691 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5692 : (u16) ctxt->src.val;
5694 case 0xbe ... 0xbf: /* movsx */
5695 ctxt->dst.bytes = ctxt->op_bytes;
5696 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5697 (s16) ctxt->src.val;
5700 goto cannot_emulate;
5705 if (rc != X86EMUL_CONTINUE)
5711 return EMULATION_FAILED;
5714 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5716 invalidate_registers(ctxt);
5719 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5721 writeback_registers(ctxt);
5724 bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5726 if (ctxt->rep_prefix && (ctxt->d & String))
5729 if (ctxt->d & TwoMemOp)