KVM: x86: Unify pr_fmt to use module name for all KVM modules
[linux-block.git] / arch / x86 / kvm / emulate.c
CommitLineData
20c8ccb1 1// SPDX-License-Identifier: GPL-2.0-only
6aa8b732 2/******************************************************************************
56e82318 3 * emulate.c
6aa8b732
AK
4 *
5 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 *
7 * Copyright (c) 2005 Keir Fraser
8 *
9 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
dcc0766b 10 * privileged instructions:
6aa8b732
AK
11 *
12 * Copyright (C) 2006 Qumranet
9611c187 13 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
6aa8b732
AK
14 *
15 * Avi Kivity <avi@qumranet.com>
16 * Yaniv Kamay <yaniv@qumranet.com>
17 *
6aa8b732
AK
18 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
19 */
8d20bd63 20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6aa8b732 21
edf88417 22#include <linux/kvm_host.h>
5fdbf976 23#include "kvm_cache_regs.h"
2f728d66 24#include "kvm_emulate.h"
b7d491e7 25#include <linux/stringify.h>
3db176d5 26#include <asm/debugreg.h>
1a29b5b7 27#include <asm/nospec-branch.h>
3986f65d 28#include <asm/ibt.h>
6aa8b732 29
3eeb3288 30#include "x86.h"
38ba30ba 31#include "tss.h"
d1cd3ce9 32#include "mmu.h"
2d7921c4 33#include "pmu.h"
e99f0507 34
a9945549
AK
35/*
36 * Operand types
37 */
b1ea50b2
AK
38#define OpNone 0ull
39#define OpImplicit 1ull /* No generic decode */
40#define OpReg 2ull /* Register */
41#define OpMem 3ull /* Memory */
42#define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
43#define OpDI 5ull /* ES:DI/EDI/RDI */
44#define OpMem64 6ull /* Memory, 64-bit */
45#define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
46#define OpDX 8ull /* DX register */
4dd6a57d
AK
47#define OpCL 9ull /* CL register (for shifts) */
48#define OpImmByte 10ull /* 8-bit sign extended immediate */
49#define OpOne 11ull /* Implied 1 */
5e2c6883 50#define OpImm 12ull /* Sign extended up to 32-bit immediate */
0fe59128
AK
51#define OpMem16 13ull /* Memory operand (16-bit). */
52#define OpMem32 14ull /* Memory operand (32-bit). */
53#define OpImmU 15ull /* Immediate operand, zero extended */
54#define OpSI 16ull /* SI/ESI/RSI */
55#define OpImmFAddr 17ull /* Immediate far address */
56#define OpMemFAddr 18ull /* Far address in memory */
57#define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
c191a7a0
AK
58#define OpES 20ull /* ES */
59#define OpCS 21ull /* CS */
60#define OpSS 22ull /* SS */
61#define OpDS 23ull /* DS */
62#define OpFS 24ull /* FS */
63#define OpGS 25ull /* GS */
28867cee 64#define OpMem8 26ull /* 8-bit zero extended memory operand */
5e2c6883 65#define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
7fa57952 66#define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
820207c8
AK
67#define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
68#define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
0fe59128
AK
69
70#define OpBits 5 /* Width of operand field */
b1ea50b2 71#define OpMask ((1ull << OpBits) - 1)
a9945549 72
6aa8b732
AK
73/*
74 * Opcode effective-address decode tables.
75 * Note that we only emulate instructions that have at least one memory
76 * operand (excluding implicit stack references). We assume that stack
77 * references and instruction fetches will never occur in special memory
78 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
79 * not be handled.
80 */
81
82/* Operand sizes: 8-bit operands or specified/overridden size. */
ab85b12b 83#define ByteOp (1<<0) /* 8-bit operands. */
6aa8b732 84/* Destination operand type. */
a9945549
AK
85#define DstShift 1
86#define ImplicitOps (OpImplicit << DstShift)
87#define DstReg (OpReg << DstShift)
88#define DstMem (OpMem << DstShift)
89#define DstAcc (OpAcc << DstShift)
90#define DstDI (OpDI << DstShift)
91#define DstMem64 (OpMem64 << DstShift)
16bebefe 92#define DstMem16 (OpMem16 << DstShift)
a9945549
AK
93#define DstImmUByte (OpImmUByte << DstShift)
94#define DstDX (OpDX << DstShift)
820207c8 95#define DstAccLo (OpAccLo << DstShift)
a9945549 96#define DstMask (OpMask << DstShift)
6aa8b732 97/* Source operand type. */
0fe59128
AK
98#define SrcShift 6
99#define SrcNone (OpNone << SrcShift)
100#define SrcReg (OpReg << SrcShift)
101#define SrcMem (OpMem << SrcShift)
102#define SrcMem16 (OpMem16 << SrcShift)
103#define SrcMem32 (OpMem32 << SrcShift)
104#define SrcImm (OpImm << SrcShift)
105#define SrcImmByte (OpImmByte << SrcShift)
106#define SrcOne (OpOne << SrcShift)
107#define SrcImmUByte (OpImmUByte << SrcShift)
108#define SrcImmU (OpImmU << SrcShift)
109#define SrcSI (OpSI << SrcShift)
7fa57952 110#define SrcXLat (OpXLat << SrcShift)
0fe59128
AK
111#define SrcImmFAddr (OpImmFAddr << SrcShift)
112#define SrcMemFAddr (OpMemFAddr << SrcShift)
113#define SrcAcc (OpAcc << SrcShift)
114#define SrcImmU16 (OpImmU16 << SrcShift)
5e2c6883 115#define SrcImm64 (OpImm64 << SrcShift)
0fe59128 116#define SrcDX (OpDX << SrcShift)
28867cee 117#define SrcMem8 (OpMem8 << SrcShift)
820207c8 118#define SrcAccHi (OpAccHi << SrcShift)
0fe59128 119#define SrcMask (OpMask << SrcShift)
221192bd
MT
120#define BitOp (1<<11)
121#define MemAbs (1<<12) /* Memory operand is absolute displacement */
122#define String (1<<13) /* String instruction (rep capable) */
123#define Stack (1<<14) /* Stack instruction (push/pop) */
124#define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
125#define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
126#define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
127#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
128#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
045a282c 129#define Escape (5<<15) /* Escape to coprocessor instruction */
39f062ff 130#define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
2276b511 131#define ModeDual (7<<15) /* Different instruction for 32/64 bit */
221192bd 132#define Sse (1<<18) /* SSE Vector instruction */
20c29ff2
AK
133/* Generic ModRM decode. */
134#define ModRM (1<<19)
135/* Destination is only written; never read. */
136#define Mov (1<<20)
d8769fed 137/* Misc flags */
8ea7d6ae 138#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
b51e974f 139#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
5a506b12 140#define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
7f9b4b75 141#define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
047a4818 142#define Undefined (1<<25) /* No Such Instruction */
d380a5e4 143#define Lock (1<<26) /* lock prefix is allowed for the instruction */
e92805ac 144#define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
d8769fed 145#define No64 (1<<28)
d5ae7ce8 146#define PageTable (1 << 29) /* instruction used to write page table */
0b789eee 147#define NotImpl (1 << 30) /* instruction is not implemented */
0dc8d10f 148/* Source 2 operand type */
0b789eee 149#define Src2Shift (31)
4dd6a57d 150#define Src2None (OpNone << Src2Shift)
ab2c5ce6 151#define Src2Mem (OpMem << Src2Shift)
4dd6a57d
AK
152#define Src2CL (OpCL << Src2Shift)
153#define Src2ImmByte (OpImmByte << Src2Shift)
154#define Src2One (OpOne << Src2Shift)
155#define Src2Imm (OpImm << Src2Shift)
c191a7a0
AK
156#define Src2ES (OpES << Src2Shift)
157#define Src2CS (OpCS << Src2Shift)
158#define Src2SS (OpSS << Src2Shift)
159#define Src2DS (OpDS << Src2Shift)
160#define Src2FS (OpFS << Src2Shift)
161#define Src2GS (OpGS << Src2Shift)
4dd6a57d 162#define Src2Mask (OpMask << Src2Shift)
cbe2c9d3 163#define Mmx ((u64)1 << 40) /* MMX Vector instruction */
48520187 164#define AlignMask ((u64)7 << 41)
1c11b376 165#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
48520187
RK
166#define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */
167#define Avx ((u64)3 << 41) /* Advanced Vector Extensions */
168#define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
e28bbd44 169#define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
b6744dc3 170#define NoWrite ((u64)1 << 45) /* No writeback */
fb32b1ed 171#define SrcWrite ((u64)1 << 46) /* Write back src operand */
9b88ae99 172#define NoMod ((u64)1 << 47) /* Mod field is ignored */
d40a6898
PB
173#define Intercept ((u64)1 << 48) /* Has valid intercept field */
174#define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
68efa764 175#define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
58b7075d 176#define NearBranch ((u64)1 << 52) /* Near branches */
ed9aad21 177#define No16 ((u64)1 << 53) /* No 16 bit operand */
ab708099 178#define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
0f89b207 179#define TwoMemOp ((u64)1 << 55) /* Instruction has two memory operand */
018d70ff 180#define IsBranch ((u64)1 << 56) /* Instruction is considered a branch. */
6aa8b732 181
820207c8 182#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
6aa8b732 183
d0e53325
AK
184#define X2(x...) x, x
185#define X3(x...) X2(x), x
186#define X4(x...) X2(x), X2(x)
187#define X5(x...) X4(x), x
188#define X6(x...) X4(x), X2(x)
189#define X7(x...) X4(x), X3(x)
190#define X8(x...) X4(x), X4(x)
191#define X16(x...) X8(x), X8(x)
83babbca 192
d65b1dee 193struct opcode {
018d70ff
EH
194 u64 flags;
195 u8 intercept;
196 u8 pad[7];
120df890 197 union {
ef65c889 198 int (*execute)(struct x86_emulate_ctxt *ctxt);
fd0a0d82
MK
199 const struct opcode *group;
200 const struct group_dual *gdual;
201 const struct gprefix *gprefix;
045a282c 202 const struct escape *esc;
39f062ff 203 const struct instr_dual *idual;
2276b511 204 const struct mode_dual *mdual;
e28bbd44 205 void (*fastop)(struct fastop *fake);
120df890 206 } u;
d09beabd 207 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
120df890
AK
208};
209
210struct group_dual {
211 struct opcode mod012[8];
212 struct opcode mod3[8];
d65b1dee
AK
213};
214
0d7cdee8
AK
215struct gprefix {
216 struct opcode pfx_no;
217 struct opcode pfx_66;
218 struct opcode pfx_f2;
219 struct opcode pfx_f3;
220};
221
045a282c
GN
222struct escape {
223 struct opcode op[8];
224 struct opcode high[64];
225};
226
39f062ff
NA
227struct instr_dual {
228 struct opcode mod012;
229 struct opcode mod3;
230};
231
2276b511
NA
232struct mode_dual {
233 struct opcode mode32;
234 struct opcode mode64;
235};
236
62bd430e 237#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
62bd430e 238
3dc4bc4f
NA
239enum x86_transfer_type {
240 X86_TRANSFER_NONE,
241 X86_TRANSFER_CALL_JMP,
242 X86_TRANSFER_RET,
243 X86_TRANSFER_TASK_SWITCH,
244};
245
dd856efa
AK
246static void writeback_registers(struct x86_emulate_ctxt *ctxt)
247{
61d9c412 248 unsigned long dirty = ctxt->regs_dirty;
dd856efa
AK
249 unsigned reg;
250
a5ba67b4 251 for_each_set_bit(reg, &dirty, NR_EMULATOR_GPRS)
dd856efa
AK
252 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
253}
254
255static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
256{
257 ctxt->regs_dirty = 0;
258 ctxt->regs_valid = 0;
259}
260
6aa8b732
AK
261/*
262 * These EFLAGS bits are restored from saved value during emulation, and
263 * any changes are written back to the saved value after emulation.
264 */
0efb0440
NA
265#define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
266 X86_EFLAGS_PF|X86_EFLAGS_CF)
6aa8b732 267
dda96d8f
AK
268#ifdef CONFIG_X86_64
269#define ON64(x) x
270#else
271#define ON64(x)
272#endif
273
b78a8552
QC
274/*
275 * fastop functions have a special calling convention:
276 *
277 * dst: rax (in/out)
278 * src: rdx (in/out)
279 * src2: rcx (in)
280 * flags: rflags (in/out)
281 * ex: rsi (in:fastop pointer, out:zero if exception)
282 *
283 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
284 * different operand sizes can be reached by calculation, rather than a jump
285 * table (which would be bigger than the code).
79629181
PB
286 *
287 * The 16 byte alignment, considering 5 bytes for the RET thunk, 3 for ENDBR
288 * and 1 for the straight line speculation INT3, leaves 7 bytes for the
289 * body of the function. Currently none is larger than 4.
b78a8552 290 */
3009afc6 291static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
4d758349 292
79629181
PB
293#define FASTOP_SIZE 16
294
d99a6ce7 295#define __FOP_FUNC(name) \
1482a082
JP
296 ".align " __stringify(FASTOP_SIZE) " \n\t" \
297 ".type " name ", @function \n\t" \
6649fa87 298 name ":\n\t" \
3d9606b0
JP
299 ASM_ENDBR \
300 IBT_NOSEAL(name)
1482a082 301
d99a6ce7
JP
302#define FOP_FUNC(name) \
303 __FOP_FUNC(#name)
304
305#define __FOP_RET(name) \
c9a34c3f 306 "11: " ASM_RET \
d99a6ce7
JP
307 ".size " name ", .-" name "\n\t"
308
309#define FOP_RET(name) \
310 __FOP_RET(#name)
b7d491e7 311
af2e140f 312#define __FOP_START(op, align) \
b7d491e7
AK
313 extern void em_##op(struct fastop *fake); \
314 asm(".pushsection .text, \"ax\" \n\t" \
315 ".global em_" #op " \n\t" \
af2e140f 316 ".align " __stringify(align) " \n\t" \
d99a6ce7 317 "em_" #op ":\n\t"
b7d491e7 318
af2e140f
PZ
319#define FOP_START(op) __FOP_START(op, FASTOP_SIZE)
320
b7d491e7
AK
321#define FOP_END \
322 ".popsection")
323
d99a6ce7
JP
324#define __FOPNOP(name) \
325 __FOP_FUNC(name) \
326 __FOP_RET(name)
327
1482a082 328#define FOPNOP() \
d99a6ce7 329 __FOPNOP(__stringify(__UNIQUE_ID(nop)))
0bdea068 330
b7d491e7 331#define FOP1E(op, dst) \
d99a6ce7
JP
332 __FOP_FUNC(#op "_" #dst) \
333 "10: " #op " %" #dst " \n\t" \
334 __FOP_RET(#op "_" #dst)
b8c0b6ae
AK
335
336#define FOP1EEX(op, dst) \
c9a34c3f 337 FOP1E(op, dst) _ASM_EXTABLE_TYPE_REG(10b, 11b, EX_TYPE_ZERO_REG, %%esi)
b7d491e7
AK
338
339#define FASTOP1(op) \
340 FOP_START(op) \
341 FOP1E(op##b, al) \
342 FOP1E(op##w, ax) \
343 FOP1E(op##l, eax) \
344 ON64(FOP1E(op##q, rax)) \
345 FOP_END
346
b9fa409b
AK
347/* 1-operand, using src2 (for MUL/DIV r/m) */
348#define FASTOP1SRC2(op, name) \
349 FOP_START(name) \
350 FOP1E(op, cl) \
351 FOP1E(op, cx) \
352 FOP1E(op, ecx) \
353 ON64(FOP1E(op, rcx)) \
354 FOP_END
355
b8c0b6ae
AK
356/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
357#define FASTOP1SRC2EX(op, name) \
358 FOP_START(name) \
359 FOP1EEX(op, cl) \
360 FOP1EEX(op, cx) \
361 FOP1EEX(op, ecx) \
362 ON64(FOP1EEX(op, rcx)) \
363 FOP_END
364
f7857f35 365#define FOP2E(op, dst, src) \
d99a6ce7
JP
366 __FOP_FUNC(#op "_" #dst "_" #src) \
367 #op " %" #src ", %" #dst " \n\t" \
368 __FOP_RET(#op "_" #dst "_" #src)
f7857f35
AK
369
370#define FASTOP2(op) \
371 FOP_START(op) \
017da7b6
AK
372 FOP2E(op##b, al, dl) \
373 FOP2E(op##w, ax, dx) \
374 FOP2E(op##l, eax, edx) \
375 ON64(FOP2E(op##q, rax, rdx)) \
f7857f35
AK
376 FOP_END
377
11c363ba
AK
378/* 2 operand, word only */
379#define FASTOP2W(op) \
380 FOP_START(op) \
381 FOPNOP() \
017da7b6
AK
382 FOP2E(op##w, ax, dx) \
383 FOP2E(op##l, eax, edx) \
384 ON64(FOP2E(op##q, rax, rdx)) \
11c363ba
AK
385 FOP_END
386
007a3b54
AK
387/* 2 operand, src is CL */
388#define FASTOP2CL(op) \
389 FOP_START(op) \
390 FOP2E(op##b, al, cl) \
391 FOP2E(op##w, ax, cl) \
392 FOP2E(op##l, eax, cl) \
393 ON64(FOP2E(op##q, rax, cl)) \
394 FOP_END
395
5aca3722
NA
396/* 2 operand, src and dest are reversed */
397#define FASTOP2R(op, name) \
398 FOP_START(name) \
399 FOP2E(op##b, dl, al) \
400 FOP2E(op##w, dx, ax) \
401 FOP2E(op##l, edx, eax) \
402 ON64(FOP2E(op##q, rdx, rax)) \
403 FOP_END
404
0bdea068 405#define FOP3E(op, dst, src, src2) \
d99a6ce7
JP
406 __FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
407 #op " %" #src2 ", %" #src ", %" #dst " \n\t"\
408 __FOP_RET(#op "_" #dst "_" #src "_" #src2)
0bdea068
AK
409
410/* 3-operand, word-only, src2=cl */
411#define FASTOP3WCL(op) \
412 FOP_START(op) \
413 FOPNOP() \
017da7b6
AK
414 FOP3E(op##w, ax, dx, cl) \
415 FOP3E(op##l, eax, edx, cl) \
416 ON64(FOP3E(op##q, rax, rdx, cl)) \
0bdea068
AK
417 FOP_END
418
9ae9feba 419/* Special case for SETcc - 1 instruction per cc */
1482a082 420#define FOP_SETCC(op) \
22472d12 421 FOP_FUNC(op) \
1482a082 422 #op " %al \n\t" \
22472d12 423 FOP_RET(op)
9ae9feba 424
22472d12 425FOP_START(setcc)
9ae9feba
AK
426FOP_SETCC(seto)
427FOP_SETCC(setno)
428FOP_SETCC(setc)
429FOP_SETCC(setnc)
430FOP_SETCC(setz)
431FOP_SETCC(setnz)
432FOP_SETCC(setbe)
433FOP_SETCC(setnbe)
434FOP_SETCC(sets)
435FOP_SETCC(setns)
436FOP_SETCC(setp)
437FOP_SETCC(setnp)
438FOP_SETCC(setl)
439FOP_SETCC(setnl)
440FOP_SETCC(setle)
441FOP_SETCC(setnle)
442FOP_END;
443
d99a6ce7
JP
444FOP_START(salc)
445FOP_FUNC(salc)
446"pushf; sbb %al, %al; popf \n\t"
447FOP_RET(salc)
326f578f
PB
448FOP_END;
449
aabba3c6
RK
450/*
451 * XXX: inoutclob user must know where the argument is being expanded.
a0a12c3e 452 * Using asm goto would allow us to remove _fault.
aabba3c6
RK
453 */
454#define asm_safe(insn, inoutclob...) \
455({ \
456 int _fault = 0; \
457 \
458 asm volatile("1:" insn "\n" \
459 "2:\n" \
c9a34c3f
PZ
460 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_ONE_REG, %[_fault]) \
461 : [_fault] "+r"(_fault) inoutclob ); \
aabba3c6
RK
462 \
463 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
464})
465
8a76d7f2
JR
466static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
467 enum x86_intercept intercept,
468 enum x86_intercept_stage stage)
469{
470 struct x86_instruction_info info = {
471 .intercept = intercept,
9dac77fa
AK
472 .rep_prefix = ctxt->rep_prefix,
473 .modrm_mod = ctxt->modrm_mod,
474 .modrm_reg = ctxt->modrm_reg,
475 .modrm_rm = ctxt->modrm_rm,
476 .src_val = ctxt->src.val64,
6cbc5f5a 477 .dst_val = ctxt->dst.val64,
9dac77fa
AK
478 .src_bytes = ctxt->src.bytes,
479 .dst_bytes = ctxt->dst.bytes,
480 .ad_bytes = ctxt->ad_bytes,
8a76d7f2
JR
481 .next_rip = ctxt->eip,
482 };
483
2953538e 484 return ctxt->ops->intercept(ctxt, &info, stage);
8a76d7f2
JR
485}
486
f47cfa31
AK
487static void assign_masked(ulong *dest, ulong src, ulong mask)
488{
489 *dest = (*dest & ~mask) | (src & mask);
490}
491
6fd8e127
NA
492static void assign_register(unsigned long *reg, u64 val, int bytes)
493{
494 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
495 switch (bytes) {
496 case 1:
497 *(u8 *)reg = (u8)val;
498 break;
499 case 2:
500 *(u16 *)reg = (u16)val;
501 break;
502 case 4:
503 *reg = (u32)val;
504 break; /* 64b: zero-extend */
505 case 8:
506 *reg = val;
507 break;
508 }
509}
510
9dac77fa 511static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
ddcb2885 512{
9dac77fa 513 return (1UL << (ctxt->ad_bytes << 3)) - 1;
ddcb2885
HH
514}
515
f47cfa31
AK
516static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
517{
518 u16 sel;
519 struct desc_struct ss;
520
521 if (ctxt->mode == X86EMUL_MODE_PROT64)
522 return ~0UL;
523 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
524 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
525}
526
612e89f0
AK
527static int stack_size(struct x86_emulate_ctxt *ctxt)
528{
529 return (__fls(stack_mask(ctxt)) + 1) >> 3;
530}
531
6aa8b732 532/* Access/update address held in a register, based on addressing mode. */
e4706772 533static inline unsigned long
9dac77fa 534address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
e4706772 535{
9dac77fa 536 if (ctxt->ad_bytes == sizeof(unsigned long))
e4706772
HH
537 return reg;
538 else
9dac77fa 539 return reg & ad_mask(ctxt);
e4706772
HH
540}
541
542static inline unsigned long
01485a22 543register_address(struct x86_emulate_ctxt *ctxt, int reg)
e4706772 544{
01485a22 545 return address_mask(ctxt, reg_read(ctxt, reg));
e4706772
HH
546}
547
5ad105e5
AK
548static void masked_increment(ulong *reg, ulong mask, int inc)
549{
550 assign_masked(reg, *reg + inc, mask);
551}
552
7a957275 553static inline void
01485a22 554register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
7a957275 555{
ee122a71 556 ulong *preg = reg_rmw(ctxt, reg);
5ad105e5 557
ee122a71 558 assign_register(preg, *preg + inc, ctxt->ad_bytes);
5ad105e5
AK
559}
560
561static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
562{
dd856efa 563 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
7a957275 564}
6aa8b732 565
56697687
AK
566static u32 desc_limit_scaled(struct desc_struct *desc)
567{
568 u32 limit = get_desc_limit(desc);
569
570 return desc->g ? (limit << 12) | 0xfff : limit;
571}
572
7b105ca2 573static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
7a5b56df
AK
574{
575 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
576 return 0;
577
7b105ca2 578 return ctxt->ops->get_cached_segment_base(ctxt, seg);
7a5b56df
AK
579}
580
35d3d4a1
AK
581static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
582 u32 error, bool valid)
54b8486f 583{
49a1431d
SC
584 if (KVM_EMULATOR_BUG_ON(vec > 0x1f, ctxt))
585 return X86EMUL_UNHANDLEABLE;
586
da9cb575
AK
587 ctxt->exception.vector = vec;
588 ctxt->exception.error_code = error;
589 ctxt->exception.error_code_valid = valid;
35d3d4a1 590 return X86EMUL_PROPAGATE_FAULT;
54b8486f
GN
591}
592
3b88e41a
JR
593static int emulate_db(struct x86_emulate_ctxt *ctxt)
594{
595 return emulate_exception(ctxt, DB_VECTOR, 0, false);
596}
597
35d3d4a1 598static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
54b8486f 599{
35d3d4a1 600 return emulate_exception(ctxt, GP_VECTOR, err, true);
54b8486f
GN
601}
602
618ff15d
AK
603static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
604{
605 return emulate_exception(ctxt, SS_VECTOR, err, true);
606}
607
35d3d4a1 608static int emulate_ud(struct x86_emulate_ctxt *ctxt)
54b8486f 609{
35d3d4a1 610 return emulate_exception(ctxt, UD_VECTOR, 0, false);
54b8486f
GN
611}
612
35d3d4a1 613static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
54b8486f 614{
35d3d4a1 615 return emulate_exception(ctxt, TS_VECTOR, err, true);
54b8486f
GN
616}
617
34d1f490
AK
618static int emulate_de(struct x86_emulate_ctxt *ctxt)
619{
35d3d4a1 620 return emulate_exception(ctxt, DE_VECTOR, 0, false);
34d1f490
AK
621}
622
1253791d
AK
623static int emulate_nm(struct x86_emulate_ctxt *ctxt)
624{
625 return emulate_exception(ctxt, NM_VECTOR, 0, false);
626}
627
1aa36616
AK
628static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
629{
630 u16 selector;
631 struct desc_struct desc;
632
633 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
634 return selector;
635}
636
637static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
638 unsigned seg)
639{
640 u16 dummy;
641 u32 base3;
642 struct desc_struct desc;
643
644 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
645 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
646}
647
f0ed4760
SC
648static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
649{
650 return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48;
651}
652
653static inline bool emul_is_noncanonical_address(u64 la,
654 struct x86_emulate_ctxt *ctxt)
655{
1fb85d06 656 return !__is_canonical_address(la, ctxt_virt_addr_bits(ctxt));
f0ed4760
SC
657}
658
1c11b376
AK
659/*
660 * x86 defines three classes of vector instructions: explicitly
661 * aligned, explicitly unaligned, and the rest, which change behaviour
662 * depending on whether they're AVX encoded or not.
663 *
664 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
d3fe959f
RK
665 * subject to the same check. FXSAVE and FXRSTOR are checked here too as their
666 * 512 bytes of data must be aligned to a 16 byte boundary.
1c11b376 667 */
d3fe959f 668static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
1c11b376 669{
48520187 670 u64 alignment = ctxt->d & AlignMask;
1c11b376 671
1c11b376 672 if (likely(size < 16))
d3fe959f 673 return 1;
1c11b376 674
48520187
RK
675 switch (alignment) {
676 case Unaligned:
677 case Avx:
d3fe959f 678 return 1;
48520187 679 case Aligned16:
d3fe959f 680 return 16;
48520187
RK
681 case Aligned:
682 default:
d3fe959f 683 return size;
48520187 684 }
1c11b376
AK
685}
686
d09155d2
PB
687static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
688 struct segmented_address addr,
689 unsigned *max_size, unsigned size,
690 bool write, bool fetch,
d50eaa18 691 enum x86emul_mode mode, ulong *linear)
52fd8b44 692{
618ff15d
AK
693 struct desc_struct desc;
694 bool usable;
52fd8b44 695 ulong la;
618ff15d 696 u32 lim;
1aa36616 697 u16 sel;
fd8cb433 698 u8 va_bits;
52fd8b44 699
7b105ca2 700 la = seg_base(ctxt, addr.seg) + addr.ea;
fd56e154 701 *max_size = 0;
d50eaa18 702 switch (mode) {
618ff15d 703 case X86EMUL_MODE_PROT64:
0c1d77f4 704 *linear = la;
fd8cb433 705 va_bits = ctxt_virt_addr_bits(ctxt);
1fb85d06 706 if (!__is_canonical_address(la, va_bits))
abc7d8a4 707 goto bad;
fd56e154 708
fd8cb433 709 *max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
fd56e154
PB
710 if (size > *max_size)
711 goto bad;
618ff15d
AK
712 break;
713 default:
0c1d77f4 714 *linear = la = (u32)la;
1aa36616
AK
715 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
716 addr.seg);
618ff15d
AK
717 if (!usable)
718 goto bad;
58b7825b
GN
719 /* code segment in protected mode or read-only data segment */
720 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
721 || !(desc.type & 2)) && write)
618ff15d
AK
722 goto bad;
723 /* unreadable code segment */
3d9b938e 724 if (!fetch && (desc.type & 8) && !(desc.type & 2))
618ff15d
AK
725 goto bad;
726 lim = desc_limit_scaled(&desc);
997b0412 727 if (!(desc.type & 8) && (desc.type & 4)) {
fc058680 728 /* expand-down segment */
fd56e154 729 if (addr.ea <= lim)
618ff15d
AK
730 goto bad;
731 lim = desc.d ? 0xffffffff : 0xffff;
618ff15d 732 }
997b0412
PB
733 if (addr.ea > lim)
734 goto bad;
bac15531
NA
735 if (lim == 0xffffffff)
736 *max_size = ~0u;
737 else {
738 *max_size = (u64)lim + 1 - addr.ea;
739 if (size > *max_size)
740 goto bad;
741 }
618ff15d
AK
742 break;
743 }
d3fe959f 744 if (la & (insn_alignment(ctxt, size) - 1))
1c11b376 745 return emulate_gp(ctxt, 0);
52fd8b44 746 return X86EMUL_CONTINUE;
618ff15d
AK
747bad:
748 if (addr.seg == VCPU_SREG_SS)
3606189f 749 return emulate_ss(ctxt, 0);
618ff15d 750 else
3606189f 751 return emulate_gp(ctxt, 0);
52fd8b44
AK
752}
753
3d9b938e
NE
754static int linearize(struct x86_emulate_ctxt *ctxt,
755 struct segmented_address addr,
756 unsigned size, bool write,
757 ulong *linear)
758{
fd56e154 759 unsigned max_size;
d50eaa18
NA
760 return __linearize(ctxt, addr, &max_size, size, write, false,
761 ctxt->mode, linear);
3d9b938e
NE
762}
763
d087e0f7 764static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
d50eaa18
NA
765{
766 ulong linear;
767 int rc;
768 unsigned max_size;
769 struct segmented_address addr = { .seg = VCPU_SREG_CS,
770 .ea = dst };
771
772 if (ctxt->op_bytes != sizeof(unsigned long))
773 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
d087e0f7 774 rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode, &linear);
d50eaa18
NA
775 if (rc == X86EMUL_CONTINUE)
776 ctxt->_eip = addr.ea;
777 return rc;
778}
779
d087e0f7
ML
780static inline int emulator_recalc_and_set_mode(struct x86_emulate_ctxt *ctxt)
781{
782 u64 efer;
783 struct desc_struct cs;
784 u16 selector;
785 u32 base3;
786
787 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
788
789 if (!(ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PE)) {
790 /* Real mode. cpu must not have long mode active */
791 if (efer & EFER_LMA)
792 return X86EMUL_UNHANDLEABLE;
793 ctxt->mode = X86EMUL_MODE_REAL;
794 return X86EMUL_CONTINUE;
795 }
796
797 if (ctxt->eflags & X86_EFLAGS_VM) {
798 /* Protected/VM86 mode. cpu must not have long mode active */
799 if (efer & EFER_LMA)
800 return X86EMUL_UNHANDLEABLE;
801 ctxt->mode = X86EMUL_MODE_VM86;
802 return X86EMUL_CONTINUE;
803 }
804
805 if (!ctxt->ops->get_segment(ctxt, &selector, &cs, &base3, VCPU_SREG_CS))
806 return X86EMUL_UNHANDLEABLE;
807
808 if (efer & EFER_LMA) {
809 if (cs.l) {
810 /* Proper long mode */
811 ctxt->mode = X86EMUL_MODE_PROT64;
812 } else if (cs.d) {
813 /* 32 bit compatibility mode*/
814 ctxt->mode = X86EMUL_MODE_PROT32;
815 } else {
816 ctxt->mode = X86EMUL_MODE_PROT16;
817 }
818 } else {
819 /* Legacy 32 bit / 16 bit mode */
820 ctxt->mode = cs.d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
821 }
822
823 return X86EMUL_CONTINUE;
824}
825
d50eaa18
NA
826static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
827{
d087e0f7 828 return assign_eip(ctxt, dst);
3d9b938e
NE
829}
830
d087e0f7 831static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst)
d50eaa18 832{
d087e0f7 833 int rc = emulator_recalc_and_set_mode(ctxt);
d50eaa18 834
d087e0f7
ML
835 if (rc != X86EMUL_CONTINUE)
836 return rc;
d50eaa18 837
d087e0f7 838 return assign_eip(ctxt, dst);
d50eaa18
NA
839}
840
841static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
842{
843 return assign_eip_near(ctxt, ctxt->_eip + rel);
844}
3d9b938e 845
79367a65
PB
846static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
847 void *data, unsigned size)
848{
3c9fa24c 849 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
79367a65
PB
850}
851
852static int linear_write_system(struct x86_emulate_ctxt *ctxt,
853 ulong linear, void *data,
854 unsigned int size)
855{
3c9fa24c 856 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
79367a65
PB
857}
858
3ca3ac4d
AK
859static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
860 struct segmented_address addr,
861 void *data,
862 unsigned size)
863{
9fa088f4
AK
864 int rc;
865 ulong linear;
866
83b8795a 867 rc = linearize(ctxt, addr, size, false, &linear);
9fa088f4
AK
868 if (rc != X86EMUL_CONTINUE)
869 return rc;
3c9fa24c 870 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
3ca3ac4d
AK
871}
872
129a72a0
SR
873static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
874 struct segmented_address addr,
875 void *data,
876 unsigned int size)
877{
878 int rc;
879 ulong linear;
880
881 rc = linearize(ctxt, addr, size, true, &linear);
882 if (rc != X86EMUL_CONTINUE)
883 return rc;
3c9fa24c 884 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
129a72a0
SR
885}
886
807941b1 887/*
285ca9e9 888 * Prefetch the remaining bytes of the instruction without crossing page
807941b1
TY
889 * boundary if they are not in fetch_cache yet.
890 */
9506d57d 891static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
62266869 892{
62266869 893 int rc;
fd56e154 894 unsigned size, max_size;
285ca9e9 895 unsigned long linear;
17052f16 896 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
285ca9e9 897 struct segmented_address addr = { .seg = VCPU_SREG_CS,
17052f16
PB
898 .ea = ctxt->eip + cur_size };
899
fd56e154
PB
900 /*
901 * We do not know exactly how many bytes will be needed, and
902 * __linearize is expensive, so fetch as much as possible. We
903 * just have to avoid going beyond the 15 byte limit, the end
904 * of the segment, or the end of the page.
905 *
906 * __linearize is called with size 0 so that it does not do any
907 * boundary check itself. Instead, we use max_size to check
908 * against op_size.
909 */
d50eaa18
NA
910 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
911 &linear);
719d5a9b
PB
912 if (unlikely(rc != X86EMUL_CONTINUE))
913 return rc;
914
fd56e154 915 size = min_t(unsigned, 15UL ^ cur_size, max_size);
719d5a9b 916 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
5cfc7e0f
PB
917
918 /*
919 * One instruction can only straddle two pages,
920 * and one has been loaded at the beginning of
921 * x86_decode_insn. So, if not enough bytes
922 * still, we must have hit the 15-byte boundary.
923 */
924 if (unlikely(size < op_size))
fd56e154
PB
925 return emulate_gp(ctxt, 0);
926
17052f16 927 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
285ca9e9
PB
928 size, &ctxt->exception);
929 if (unlikely(rc != X86EMUL_CONTINUE))
930 return rc;
17052f16 931 ctxt->fetch.end += size;
3e2815e9 932 return X86EMUL_CONTINUE;
62266869
AK
933}
934
9506d57d
PB
935static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
936 unsigned size)
62266869 937{
08da44ae
NA
938 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
939
940 if (unlikely(done_size < size))
941 return __do_insn_fetch_bytes(ctxt, size - done_size);
9506d57d
PB
942 else
943 return X86EMUL_CONTINUE;
62266869
AK
944}
945
67cbc90d 946/* Fetch next part of the instruction being emulated. */
e85a1085 947#define insn_fetch(_type, _ctxt) \
9506d57d 948({ _type _x; \
9506d57d
PB
949 \
950 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
67cbc90d
TY
951 if (rc != X86EMUL_CONTINUE) \
952 goto done; \
9506d57d 953 ctxt->_eip += sizeof(_type); \
8616abc2 954 memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
17052f16 955 ctxt->fetch.ptr += sizeof(_type); \
9506d57d 956 _x; \
67cbc90d
TY
957})
958
807941b1 959#define insn_fetch_arr(_arr, _size, _ctxt) \
9506d57d 960({ \
9506d57d 961 rc = do_insn_fetch_bytes(_ctxt, _size); \
67cbc90d
TY
962 if (rc != X86EMUL_CONTINUE) \
963 goto done; \
9506d57d 964 ctxt->_eip += (_size); \
17052f16
PB
965 memcpy(_arr, ctxt->fetch.ptr, _size); \
966 ctxt->fetch.ptr += (_size); \
67cbc90d
TY
967})
968
1e3c5cb0
RR
969/*
970 * Given the 'reg' portion of a ModRM byte, and a register block, return a
971 * pointer into the block that addresses the relevant register.
972 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
973 */
dd856efa 974static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
aa9ac1a6 975 int byteop)
6aa8b732
AK
976{
977 void *p;
aa9ac1a6 978 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
6aa8b732 979
6aa8b732 980 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
dd856efa
AK
981 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
982 else
983 p = reg_rmw(ctxt, modrm_reg);
6aa8b732
AK
984 return p;
985}
986
987static int read_descriptor(struct x86_emulate_ctxt *ctxt,
90de84f5 988 struct segmented_address addr,
6aa8b732
AK
989 u16 *size, unsigned long *address, int op_bytes)
990{
991 int rc;
992
993 if (op_bytes == 2)
994 op_bytes = 3;
995 *address = 0;
3ca3ac4d 996 rc = segmented_read_std(ctxt, addr, size, 2);
1b30eaa8 997 if (rc != X86EMUL_CONTINUE)
6aa8b732 998 return rc;
30b31ab6 999 addr.ea += 2;
3ca3ac4d 1000 rc = segmented_read_std(ctxt, addr, address, op_bytes);
6aa8b732
AK
1001 return rc;
1002}
1003
34b77652
AK
1004FASTOP2(add);
1005FASTOP2(or);
1006FASTOP2(adc);
1007FASTOP2(sbb);
1008FASTOP2(and);
1009FASTOP2(sub);
1010FASTOP2(xor);
1011FASTOP2(cmp);
1012FASTOP2(test);
1013
b9fa409b
AK
1014FASTOP1SRC2(mul, mul_ex);
1015FASTOP1SRC2(imul, imul_ex);
b8c0b6ae
AK
1016FASTOP1SRC2EX(div, div_ex);
1017FASTOP1SRC2EX(idiv, idiv_ex);
b9fa409b 1018
34b77652
AK
1019FASTOP3WCL(shld);
1020FASTOP3WCL(shrd);
1021
1022FASTOP2W(imul);
1023
1024FASTOP1(not);
1025FASTOP1(neg);
1026FASTOP1(inc);
1027FASTOP1(dec);
1028
1029FASTOP2CL(rol);
1030FASTOP2CL(ror);
1031FASTOP2CL(rcl);
1032FASTOP2CL(rcr);
1033FASTOP2CL(shl);
1034FASTOP2CL(shr);
1035FASTOP2CL(sar);
1036
1037FASTOP2W(bsf);
1038FASTOP2W(bsr);
1039FASTOP2W(bt);
1040FASTOP2W(bts);
1041FASTOP2W(btr);
1042FASTOP2W(btc);
1043
e47a5f5f
AK
1044FASTOP2(xadd);
1045
5aca3722
NA
1046FASTOP2R(cmp, cmp_r);
1047
900efe20
NA
1048static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1049{
1050 /* If src is zero, do not writeback, but update flags */
1051 if (ctxt->src.val == 0)
1052 ctxt->dst.type = OP_NONE;
1053 return fastop(ctxt, em_bsf);
1054}
1055
1056static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1057{
1058 /* If src is zero, do not writeback, but update flags */
1059 if (ctxt->src.val == 0)
1060 ctxt->dst.type = OP_NONE;
1061 return fastop(ctxt, em_bsr);
1062}
1063
cb7390fe 1064static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
bbe9abbd 1065{
9ae9feba 1066 u8 rc;
22472d12 1067 void (*fop)(void) = (void *)em_setcc + FASTOP_SIZE * (condition & 0xf);
bbe9abbd 1068
9ae9feba 1069 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1a29b5b7
PZ
1070 asm("push %[flags]; popf; " CALL_NOSPEC
1071 : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
9ae9feba 1072 return rc;
bbe9abbd
NK
1073}
1074
91ff3cb4
AK
1075static void fetch_register_operand(struct operand *op)
1076{
1077 switch (op->bytes) {
1078 case 1:
1079 op->val = *(u8 *)op->addr.reg;
1080 break;
1081 case 2:
1082 op->val = *(u16 *)op->addr.reg;
1083 break;
1084 case 4:
1085 op->val = *(u32 *)op->addr.reg;
1086 break;
1087 case 8:
1088 op->val = *(u64 *)op->addr.reg;
1089 break;
1090 }
1091}
1092
045a282c
GN
1093static int em_fninit(struct x86_emulate_ctxt *ctxt)
1094{
1095 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1096 return emulate_nm(ctxt);
1097
43e51464 1098 kvm_fpu_get();
045a282c 1099 asm volatile("fninit");
43e51464 1100 kvm_fpu_put();
045a282c
GN
1101 return X86EMUL_CONTINUE;
1102}
1103
1104static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1105{
1106 u16 fcw;
1107
1108 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1109 return emulate_nm(ctxt);
1110
43e51464 1111 kvm_fpu_get();
045a282c 1112 asm volatile("fnstcw %0": "+m"(fcw));
43e51464 1113 kvm_fpu_put();
045a282c 1114
045a282c
GN
1115 ctxt->dst.val = fcw;
1116
1117 return X86EMUL_CONTINUE;
1118}
1119
1120static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1121{
1122 u16 fsw;
1123
1124 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1125 return emulate_nm(ctxt);
1126
43e51464 1127 kvm_fpu_get();
045a282c 1128 asm volatile("fnstsw %0": "+m"(fsw));
43e51464 1129 kvm_fpu_put();
045a282c 1130
045a282c
GN
1131 ctxt->dst.val = fsw;
1132
1133 return X86EMUL_CONTINUE;
1134}
1135
1253791d 1136static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
2adb5ad9 1137 struct operand *op)
3c118e24 1138{
e390f4d6 1139 unsigned int reg;
33615aa9 1140
e390f4d6
LN
1141 if (ctxt->d & ModRM)
1142 reg = ctxt->modrm_reg;
1143 else
9dac77fa 1144 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1253791d 1145
9dac77fa 1146 if (ctxt->d & Sse) {
1253791d
AK
1147 op->type = OP_XMM;
1148 op->bytes = 16;
1149 op->addr.xmm = reg;
43e51464 1150 kvm_read_sse_reg(reg, &op->vec_val);
1253791d
AK
1151 return;
1152 }
cbe2c9d3
AK
1153 if (ctxt->d & Mmx) {
1154 reg &= 7;
1155 op->type = OP_MM;
1156 op->bytes = 8;
1157 op->addr.mm = reg;
1158 return;
1159 }
1253791d 1160
3c118e24 1161 op->type = OP_REG;
6d4d85ec
GN
1162 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1163 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1164
91ff3cb4 1165 fetch_register_operand(op);
3c118e24
AK
1166 op->orig_val = op->val;
1167}
1168
a6e3407b
AK
1169static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1170{
1171 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1172 ctxt->modrm_seg = VCPU_SREG_SS;
1173}
1174
1c73ef66 1175static int decode_modrm(struct x86_emulate_ctxt *ctxt,
2dbd0dd7 1176 struct operand *op)
1c73ef66 1177{
1c73ef66 1178 u8 sib;
02357bdc 1179 int index_reg, base_reg, scale;
3e2815e9 1180 int rc = X86EMUL_CONTINUE;
2dbd0dd7 1181 ulong modrm_ea = 0;
1c73ef66 1182
02357bdc
BD
1183 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1184 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1185 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1c73ef66 1186
02357bdc 1187 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
9dac77fa 1188 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
02357bdc 1189 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
9dac77fa 1190 ctxt->modrm_seg = VCPU_SREG_DS;
1c73ef66 1191
9b88ae99 1192 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
2dbd0dd7 1193 op->type = OP_REG;
9dac77fa 1194 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
8acb4207 1195 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
aa9ac1a6 1196 ctxt->d & ByteOp);
9dac77fa 1197 if (ctxt->d & Sse) {
1253791d
AK
1198 op->type = OP_XMM;
1199 op->bytes = 16;
9dac77fa 1200 op->addr.xmm = ctxt->modrm_rm;
43e51464 1201 kvm_read_sse_reg(ctxt->modrm_rm, &op->vec_val);
1253791d
AK
1202 return rc;
1203 }
cbe2c9d3
AK
1204 if (ctxt->d & Mmx) {
1205 op->type = OP_MM;
1206 op->bytes = 8;
bdc90722 1207 op->addr.mm = ctxt->modrm_rm & 7;
cbe2c9d3
AK
1208 return rc;
1209 }
2dbd0dd7 1210 fetch_register_operand(op);
1c73ef66
AK
1211 return rc;
1212 }
1213
2dbd0dd7
AK
1214 op->type = OP_MEM;
1215
9dac77fa 1216 if (ctxt->ad_bytes == 2) {
dd856efa
AK
1217 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1218 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1219 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1220 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1c73ef66
AK
1221
1222 /* 16-bit ModR/M decode. */
9dac77fa 1223 switch (ctxt->modrm_mod) {
1c73ef66 1224 case 0:
9dac77fa 1225 if (ctxt->modrm_rm == 6)
e85a1085 1226 modrm_ea += insn_fetch(u16, ctxt);
1c73ef66
AK
1227 break;
1228 case 1:
e85a1085 1229 modrm_ea += insn_fetch(s8, ctxt);
1c73ef66
AK
1230 break;
1231 case 2:
e85a1085 1232 modrm_ea += insn_fetch(u16, ctxt);
1c73ef66
AK
1233 break;
1234 }
9dac77fa 1235 switch (ctxt->modrm_rm) {
1c73ef66 1236 case 0:
2dbd0dd7 1237 modrm_ea += bx + si;
1c73ef66
AK
1238 break;
1239 case 1:
2dbd0dd7 1240 modrm_ea += bx + di;
1c73ef66
AK
1241 break;
1242 case 2:
2dbd0dd7 1243 modrm_ea += bp + si;
1c73ef66
AK
1244 break;
1245 case 3:
2dbd0dd7 1246 modrm_ea += bp + di;
1c73ef66
AK
1247 break;
1248 case 4:
2dbd0dd7 1249 modrm_ea += si;
1c73ef66
AK
1250 break;
1251 case 5:
2dbd0dd7 1252 modrm_ea += di;
1c73ef66
AK
1253 break;
1254 case 6:
9dac77fa 1255 if (ctxt->modrm_mod != 0)
2dbd0dd7 1256 modrm_ea += bp;
1c73ef66
AK
1257 break;
1258 case 7:
2dbd0dd7 1259 modrm_ea += bx;
1c73ef66
AK
1260 break;
1261 }
9dac77fa
AK
1262 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1263 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1264 ctxt->modrm_seg = VCPU_SREG_SS;
2dbd0dd7 1265 modrm_ea = (u16)modrm_ea;
1c73ef66
AK
1266 } else {
1267 /* 32/64-bit ModR/M decode. */
9dac77fa 1268 if ((ctxt->modrm_rm & 7) == 4) {
e85a1085 1269 sib = insn_fetch(u8, ctxt);
1c73ef66
AK
1270 index_reg |= (sib >> 3) & 7;
1271 base_reg |= sib & 7;
1272 scale = sib >> 6;
1273
9dac77fa 1274 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
e85a1085 1275 modrm_ea += insn_fetch(s32, ctxt);
a6e3407b 1276 else {
dd856efa 1277 modrm_ea += reg_read(ctxt, base_reg);
a6e3407b 1278 adjust_modrm_seg(ctxt, base_reg);
ab708099
NA
1279 /* Increment ESP on POP [ESP] */
1280 if ((ctxt->d & IncSP) &&
1281 base_reg == VCPU_REGS_RSP)
1282 modrm_ea += ctxt->op_bytes;
a6e3407b 1283 }
dc71d0f1 1284 if (index_reg != 4)
dd856efa 1285 modrm_ea += reg_read(ctxt, index_reg) << scale;
9dac77fa 1286 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
5b38ab87 1287 modrm_ea += insn_fetch(s32, ctxt);
84411d85 1288 if (ctxt->mode == X86EMUL_MODE_PROT64)
9dac77fa 1289 ctxt->rip_relative = 1;
a6e3407b
AK
1290 } else {
1291 base_reg = ctxt->modrm_rm;
dd856efa 1292 modrm_ea += reg_read(ctxt, base_reg);
a6e3407b
AK
1293 adjust_modrm_seg(ctxt, base_reg);
1294 }
9dac77fa 1295 switch (ctxt->modrm_mod) {
1c73ef66 1296 case 1:
e85a1085 1297 modrm_ea += insn_fetch(s8, ctxt);
1c73ef66
AK
1298 break;
1299 case 2:
e85a1085 1300 modrm_ea += insn_fetch(s32, ctxt);
1c73ef66
AK
1301 break;
1302 }
1303 }
90de84f5 1304 op->addr.mem.ea = modrm_ea;
41061cdb
BD
1305 if (ctxt->ad_bytes != 8)
1306 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1307
1c73ef66
AK
1308done:
1309 return rc;
1310}
1311
1312static int decode_abs(struct x86_emulate_ctxt *ctxt,
2dbd0dd7 1313 struct operand *op)
1c73ef66 1314{
3e2815e9 1315 int rc = X86EMUL_CONTINUE;
1c73ef66 1316
2dbd0dd7 1317 op->type = OP_MEM;
9dac77fa 1318 switch (ctxt->ad_bytes) {
1c73ef66 1319 case 2:
e85a1085 1320 op->addr.mem.ea = insn_fetch(u16, ctxt);
1c73ef66
AK
1321 break;
1322 case 4:
e85a1085 1323 op->addr.mem.ea = insn_fetch(u32, ctxt);
1c73ef66
AK
1324 break;
1325 case 8:
e85a1085 1326 op->addr.mem.ea = insn_fetch(u64, ctxt);
1c73ef66
AK
1327 break;
1328 }
1329done:
1330 return rc;
1331}
1332
9dac77fa 1333static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
35c843c4 1334{
7129eeca 1335 long sv = 0, mask;
35c843c4 1336
9dac77fa 1337 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
7dec5603 1338 mask = ~((long)ctxt->dst.bytes * 8 - 1);
35c843c4 1339
9dac77fa
AK
1340 if (ctxt->src.bytes == 2)
1341 sv = (s16)ctxt->src.val & (s16)mask;
1342 else if (ctxt->src.bytes == 4)
1343 sv = (s32)ctxt->src.val & (s32)mask;
7dec5603
NA
1344 else
1345 sv = (s64)ctxt->src.val & (s64)mask;
35c843c4 1346
1c1c35ae
NA
1347 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1348 ctxt->dst.addr.mem.ea + (sv >> 3));
35c843c4 1349 }
ba7ff2b7
WY
1350
1351 /* only subword offset */
9dac77fa 1352 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
35c843c4
WY
1353}
1354
dde7e6d1 1355static int read_emulated(struct x86_emulate_ctxt *ctxt,
dde7e6d1 1356 unsigned long addr, void *dest, unsigned size)
6aa8b732 1357{
dde7e6d1 1358 int rc;
9dac77fa 1359 struct read_cache *mc = &ctxt->mem_read;
6aa8b732 1360
f23b070e
XG
1361 if (mc->pos < mc->end)
1362 goto read_cached;
6aa8b732 1363
d38ea957
SC
1364 if (KVM_EMULATOR_BUG_ON((mc->end + size) >= sizeof(mc->data), ctxt))
1365 return X86EMUL_UNHANDLEABLE;
f23b070e
XG
1366
1367 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1368 &ctxt->exception);
1369 if (rc != X86EMUL_CONTINUE)
1370 return rc;
1371
1372 mc->end += size;
1373
1374read_cached:
1375 memcpy(dest, mc->data + mc->pos, size);
1376 mc->pos += size;
dde7e6d1
AK
1377 return X86EMUL_CONTINUE;
1378}
6aa8b732 1379
3ca3ac4d
AK
1380static int segmented_read(struct x86_emulate_ctxt *ctxt,
1381 struct segmented_address addr,
1382 void *data,
1383 unsigned size)
1384{
9fa088f4
AK
1385 int rc;
1386 ulong linear;
1387
83b8795a 1388 rc = linearize(ctxt, addr, size, false, &linear);
9fa088f4
AK
1389 if (rc != X86EMUL_CONTINUE)
1390 return rc;
7b105ca2 1391 return read_emulated(ctxt, linear, data, size);
3ca3ac4d
AK
1392}
1393
1394static int segmented_write(struct x86_emulate_ctxt *ctxt,
1395 struct segmented_address addr,
1396 const void *data,
1397 unsigned size)
1398{
9fa088f4
AK
1399 int rc;
1400 ulong linear;
1401
83b8795a 1402 rc = linearize(ctxt, addr, size, true, &linear);
9fa088f4
AK
1403 if (rc != X86EMUL_CONTINUE)
1404 return rc;
0f65dd70
AK
1405 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1406 &ctxt->exception);
3ca3ac4d
AK
1407}
1408
1409static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1410 struct segmented_address addr,
1411 const void *orig_data, const void *data,
1412 unsigned size)
1413{
9fa088f4
AK
1414 int rc;
1415 ulong linear;
1416
83b8795a 1417 rc = linearize(ctxt, addr, size, true, &linear);
9fa088f4
AK
1418 if (rc != X86EMUL_CONTINUE)
1419 return rc;
0f65dd70
AK
1420 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1421 size, &ctxt->exception);
3ca3ac4d
AK
1422}
1423
dde7e6d1 1424static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
dde7e6d1
AK
1425 unsigned int size, unsigned short port,
1426 void *dest)
1427{
9dac77fa 1428 struct read_cache *rc = &ctxt->io_read;
b4c6abfe 1429
dde7e6d1 1430 if (rc->pos == rc->end) { /* refill pio read ahead */
dde7e6d1 1431 unsigned int in_page, n;
9dac77fa 1432 unsigned int count = ctxt->rep_prefix ?
dd856efa 1433 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
0efb0440 1434 in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
dd856efa
AK
1435 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1436 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
b55a8144 1437 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
dde7e6d1
AK
1438 if (n == 0)
1439 n = 1;
1440 rc->pos = rc->end = 0;
7b105ca2 1441 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
dde7e6d1
AK
1442 return 0;
1443 rc->end = n * size;
6aa8b732
AK
1444 }
1445
e6e39f04 1446 if (ctxt->rep_prefix && (ctxt->d & String) &&
0efb0440 1447 !(ctxt->eflags & X86_EFLAGS_DF)) {
b3356bf0
GN
1448 ctxt->dst.data = rc->data + rc->pos;
1449 ctxt->dst.type = OP_MEM_STR;
1450 ctxt->dst.count = (rc->end - rc->pos) / size;
1451 rc->pos = rc->end;
1452 } else {
1453 memcpy(dest, rc->data + rc->pos, size);
1454 rc->pos += size;
1455 }
dde7e6d1
AK
1456 return 1;
1457}
6aa8b732 1458
7f3d35fd
KW
1459static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1460 u16 index, struct desc_struct *desc)
1461{
1462 struct desc_ptr dt;
1463 ulong addr;
1464
1465 ctxt->ops->get_idt(ctxt, &dt);
1466
1467 if (dt.size < index * 8 + 7)
1468 return emulate_gp(ctxt, index << 3 | 0x2);
1469
1470 addr = dt.address + index * 8;
0e96f31e 1471 return linear_read_system(ctxt, addr, desc, sizeof(*desc));
7f3d35fd
KW
1472}
1473
dde7e6d1 1474static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
dde7e6d1
AK
1475 u16 selector, struct desc_ptr *dt)
1476{
0225fb50 1477 const struct x86_emulate_ops *ops = ctxt->ops;
2eedcac8 1478 u32 base3 = 0;
7b105ca2 1479
dde7e6d1
AK
1480 if (selector & 1 << 2) {
1481 struct desc_struct desc;
1aa36616
AK
1482 u16 sel;
1483
0e96f31e 1484 memset(dt, 0, sizeof(*dt));
2eedcac8
NA
1485 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1486 VCPU_SREG_LDTR))
dde7e6d1 1487 return;
e09d082c 1488
dde7e6d1 1489 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
2eedcac8 1490 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
dde7e6d1 1491 } else
4bff1e86 1492 ops->get_gdt(ctxt, dt);
dde7e6d1 1493}
120df890 1494
edccda7c
NA
1495static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1496 u16 selector, ulong *desc_addr_p)
dde7e6d1
AK
1497{
1498 struct desc_ptr dt;
1499 u16 index = selector >> 3;
dde7e6d1 1500 ulong addr;
120df890 1501
7b105ca2 1502 get_descriptor_table_ptr(ctxt, selector, &dt);
120df890 1503
35d3d4a1
AK
1504 if (dt.size < index * 8 + 7)
1505 return emulate_gp(ctxt, selector & 0xfffc);
e09d082c 1506
edccda7c
NA
1507 addr = dt.address + index * 8;
1508
1509#ifdef CONFIG_X86_64
1510 if (addr >> 32 != 0) {
1511 u64 efer = 0;
1512
1513 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1514 if (!(efer & EFER_LMA))
1515 addr &= (u32)-1;
1516 }
1517#endif
1518
1519 *desc_addr_p = addr;
1520 return X86EMUL_CONTINUE;
1521}
1522
1523/* allowed just for 8 bytes segments */
1524static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1525 u16 selector, struct desc_struct *desc,
1526 ulong *desc_addr_p)
1527{
1528 int rc;
1529
1530 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1531 if (rc != X86EMUL_CONTINUE)
1532 return rc;
1533
79367a65 1534 return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
dde7e6d1 1535}
ef65c889 1536
dde7e6d1
AK
1537/* allowed just for 8 bytes segments */
1538static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
dde7e6d1
AK
1539 u16 selector, struct desc_struct *desc)
1540{
edccda7c 1541 int rc;
dde7e6d1 1542 ulong addr;
6aa8b732 1543
edccda7c
NA
1544 rc = get_descriptor_ptr(ctxt, selector, &addr);
1545 if (rc != X86EMUL_CONTINUE)
1546 return rc;
6aa8b732 1547
0e96f31e 1548 return linear_write_system(ctxt, addr, desc, sizeof(*desc));
dde7e6d1 1549}
c7e75a3d 1550
2356aaeb 1551static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
d1442d85 1552 u16 selector, int seg, u8 cpl,
3dc4bc4f 1553 enum x86_transfer_type transfer,
d1442d85 1554 struct desc_struct *desc)
dde7e6d1 1555{
869be99c 1556 struct desc_struct seg_desc, old_desc;
2356aaeb 1557 u8 dpl, rpl;
dde7e6d1
AK
1558 unsigned err_vec = GP_VECTOR;
1559 u32 err_code = 0;
1560 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
e919464b 1561 ulong desc_addr;
dde7e6d1 1562 int ret;
03ebebeb 1563 u16 dummy;
e37a75a1 1564 u32 base3 = 0;
69f55cb1 1565
0e96f31e 1566 memset(&seg_desc, 0, sizeof(seg_desc));
69f55cb1 1567
f8da94e9
KW
1568 if (ctxt->mode == X86EMUL_MODE_REAL) {
1569 /* set real mode segment descriptor (keep limit etc. for
1570 * unreal mode) */
03ebebeb 1571 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
dde7e6d1 1572 set_desc_base(&seg_desc, selector << 4);
dde7e6d1 1573 goto load;
f8da94e9
KW
1574 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1575 /* VM86 needs a clean new segment descriptor */
1576 set_desc_base(&seg_desc, selector << 4);
1577 set_desc_limit(&seg_desc, 0xffff);
1578 seg_desc.type = 3;
1579 seg_desc.p = 1;
1580 seg_desc.s = 1;
1581 seg_desc.dpl = 3;
1582 goto load;
dde7e6d1
AK
1583 }
1584
79d5b4c3 1585 rpl = selector & 3;
79d5b4c3 1586
dde7e6d1
AK
1587 /* TR should be in GDT only */
1588 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1589 goto exception;
1590
33ab9110
PB
1591 /* NULL selector is not valid for TR, CS and (except for long mode) SS */
1592 if (null_selector) {
1593 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1594 goto exception;
1595
1596 if (seg == VCPU_SREG_SS) {
1597 if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1598 goto exception;
1599
1600 /*
1601 * ctxt->ops->set_segment expects the CPL to be in
1602 * SS.DPL, so fake an expand-up 32-bit data segment.
1603 */
1604 seg_desc.type = 3;
1605 seg_desc.p = 1;
1606 seg_desc.s = 1;
1607 seg_desc.dpl = cpl;
1608 seg_desc.d = 1;
1609 seg_desc.g = 1;
1610 }
1611
1612 /* Skip all following checks */
dde7e6d1 1613 goto load;
33ab9110 1614 }
dde7e6d1 1615
e919464b 1616 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
dde7e6d1
AK
1617 if (ret != X86EMUL_CONTINUE)
1618 return ret;
1619
1620 err_code = selector & 0xfffc;
3dc4bc4f
NA
1621 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1622 GP_VECTOR;
dde7e6d1 1623
fc058680 1624 /* can't load system descriptor into segment selector */
3dc4bc4f
NA
1625 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1626 if (transfer == X86_TRANSFER_CALL_JMP)
1627 return X86EMUL_UNHANDLEABLE;
dde7e6d1 1628 goto exception;
3dc4bc4f 1629 }
dde7e6d1 1630
dde7e6d1 1631 dpl = seg_desc.dpl;
dde7e6d1
AK
1632
1633 switch (seg) {
1634 case VCPU_SREG_SS:
1635 /*
1636 * segment is not a writable data segment or segment
1637 * selector's RPL != CPL or segment selector's RPL != CPL
1638 */
1639 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1640 goto exception;
6aa8b732 1641 break;
dde7e6d1
AK
1642 case VCPU_SREG_CS:
1643 if (!(seg_desc.type & 8))
1644 goto exception;
1645
1e326ad4
HW
1646 if (transfer == X86_TRANSFER_RET) {
1647 /* RET can never return to an inner privilege level. */
1648 if (rpl < cpl)
dde7e6d1 1649 goto exception;
1e326ad4
HW
1650 /* Outer-privilege level return is not implemented */
1651 if (rpl > cpl)
1652 return X86EMUL_UNHANDLEABLE;
1653 }
31c66dab
HW
1654 if (transfer == X86_TRANSFER_RET || transfer == X86_TRANSFER_TASK_SWITCH) {
1655 if (seg_desc.type & 4) {
1656 /* conforming */
1657 if (dpl > rpl)
1658 goto exception;
1659 } else {
1660 /* nonconforming */
1661 if (dpl != rpl)
1662 goto exception;
1663 }
1664 } else { /* X86_TRANSFER_CALL_JMP */
1665 if (seg_desc.type & 4) {
1666 /* conforming */
1667 if (dpl > cpl)
1668 goto exception;
1669 } else {
1670 /* nonconforming */
1671 if (rpl > cpl || dpl != cpl)
1672 goto exception;
1673 }
dde7e6d1 1674 }
040c8dc8
NA
1675 /* in long-mode d/b must be clear if l is set */
1676 if (seg_desc.d && seg_desc.l) {
1677 u64 efer = 0;
1678
1679 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1680 if (efer & EFER_LMA)
1681 goto exception;
1682 }
1683
dde7e6d1
AK
1684 /* CS(RPL) <- CPL */
1685 selector = (selector & 0xfffc) | cpl;
6aa8b732 1686 break;
dde7e6d1
AK
1687 case VCPU_SREG_TR:
1688 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1689 goto exception;
1690 break;
1691 case VCPU_SREG_LDTR:
1692 if (seg_desc.s || seg_desc.type != 2)
1693 goto exception;
1694 break;
1695 default: /* DS, ES, FS, or GS */
4e62417b 1696 /*
dde7e6d1
AK
1697 * segment is not a data or readable code segment or
1698 * ((segment is a data or nonconforming code segment)
1699 * and (both RPL and CPL > DPL))
4e62417b 1700 */
dde7e6d1
AK
1701 if ((seg_desc.type & 0xa) == 0x8 ||
1702 (((seg_desc.type & 0xc) != 0xc) &&
1703 (rpl > dpl && cpl > dpl)))
1704 goto exception;
6aa8b732 1705 break;
dde7e6d1
AK
1706 }
1707
ca85f002
HW
1708 if (!seg_desc.p) {
1709 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1710 goto exception;
1711 }
1712
dde7e6d1
AK
1713 if (seg_desc.s) {
1714 /* mark segment as accessed */
e2cefa74
NA
1715 if (!(seg_desc.type & 1)) {
1716 seg_desc.type |= 1;
1717 ret = write_segment_descriptor(ctxt, selector,
1718 &seg_desc);
1719 if (ret != X86EMUL_CONTINUE)
1720 return ret;
1721 }
e37a75a1 1722 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
79367a65 1723 ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
e37a75a1
NA
1724 if (ret != X86EMUL_CONTINUE)
1725 return ret;
fd8cb433 1726 if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
26262069
SC
1727 ((u64)base3 << 32), ctxt))
1728 return emulate_gp(ctxt, err_code);
dde7e6d1 1729 }
ec6e4d86
SC
1730
1731 if (seg == VCPU_SREG_TR) {
1732 old_desc = seg_desc;
1733 seg_desc.type |= 2; /* busy */
1734 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1735 sizeof(seg_desc), &ctxt->exception);
1736 if (ret != X86EMUL_CONTINUE)
1737 return ret;
dde7e6d1
AK
1738 }
1739load:
e37a75a1 1740 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
d1442d85
NA
1741 if (desc)
1742 *desc = seg_desc;
dde7e6d1
AK
1743 return X86EMUL_CONTINUE;
1744exception:
592f0858 1745 return emulate_exception(ctxt, err_vec, err_code, true);
dde7e6d1
AK
1746}
1747
2356aaeb
PB
1748static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1749 u16 selector, int seg)
1750{
1751 u8 cpl = ctxt->ops->cpl(ctxt);
33ab9110
PB
1752
1753 /*
1754 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
1755 * they can load it at CPL<3 (Intel's manual says only LSS can,
1756 * but it's wrong).
1757 *
1758 * However, the Intel manual says that putting IST=1/DPL=3 in
1759 * an interrupt gate will result in SS=3 (the AMD manual instead
1760 * says it doesn't), so allow SS=3 in __load_segment_descriptor
1761 * and only forbid it here.
1762 */
1763 if (seg == VCPU_SREG_SS && selector == 3 &&
1764 ctxt->mode == X86EMUL_MODE_PROT64)
1765 return emulate_exception(ctxt, GP_VECTOR, 0, true);
1766
3dc4bc4f
NA
1767 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1768 X86_TRANSFER_NONE, NULL);
2356aaeb
PB
1769}
1770
31be40b3
WY
1771static void write_register_operand(struct operand *op)
1772{
6fd8e127 1773 return assign_register(op->addr.reg, op->val, op->bytes);
31be40b3
WY
1774}
1775
fb32b1ed 1776static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
dde7e6d1 1777{
fb32b1ed 1778 switch (op->type) {
dde7e6d1 1779 case OP_REG:
fb32b1ed 1780 write_register_operand(op);
6aa8b732 1781 break;
dde7e6d1 1782 case OP_MEM:
9dac77fa 1783 if (ctxt->lock_prefix)
f5f87dfb
PB
1784 return segmented_cmpxchg(ctxt,
1785 op->addr.mem,
1786 &op->orig_val,
1787 &op->val,
1788 op->bytes);
1789 else
1790 return segmented_write(ctxt,
fb32b1ed 1791 op->addr.mem,
fb32b1ed
AK
1792 &op->val,
1793 op->bytes);
a682e354 1794 break;
b3356bf0 1795 case OP_MEM_STR:
f5f87dfb
PB
1796 return segmented_write(ctxt,
1797 op->addr.mem,
1798 op->data,
1799 op->bytes * op->count);
b3356bf0 1800 break;
1253791d 1801 case OP_XMM:
43e51464 1802 kvm_write_sse_reg(op->addr.xmm, &op->vec_val);
1253791d 1803 break;
cbe2c9d3 1804 case OP_MM:
43e51464 1805 kvm_write_mmx_reg(op->addr.mm, &op->mm_val);
cbe2c9d3 1806 break;
dde7e6d1
AK
1807 case OP_NONE:
1808 /* no writeback */
414e6277 1809 break;
dde7e6d1 1810 default:
414e6277 1811 break;
6aa8b732 1812 }
dde7e6d1
AK
1813 return X86EMUL_CONTINUE;
1814}
6aa8b732 1815
51ddff50 1816static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
dde7e6d1 1817{
4179bb02 1818 struct segmented_address addr;
0dc8d10f 1819
5ad105e5 1820 rsp_increment(ctxt, -bytes);
dd856efa 1821 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
4179bb02
TY
1822 addr.seg = VCPU_SREG_SS;
1823
51ddff50
AK
1824 return segmented_write(ctxt, addr, data, bytes);
1825}
1826
1827static int em_push(struct x86_emulate_ctxt *ctxt)
1828{
4179bb02 1829 /* Disable writeback. */
9dac77fa 1830 ctxt->dst.type = OP_NONE;
51ddff50 1831 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
dde7e6d1 1832}
69f55cb1 1833
dde7e6d1 1834static int emulate_pop(struct x86_emulate_ctxt *ctxt,
dde7e6d1
AK
1835 void *dest, int len)
1836{
dde7e6d1 1837 int rc;
90de84f5 1838 struct segmented_address addr;
8b4caf66 1839
dd856efa 1840 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
90de84f5 1841 addr.seg = VCPU_SREG_SS;
3ca3ac4d 1842 rc = segmented_read(ctxt, addr, dest, len);
dde7e6d1
AK
1843 if (rc != X86EMUL_CONTINUE)
1844 return rc;
1845
5ad105e5 1846 rsp_increment(ctxt, len);
dde7e6d1 1847 return rc;
8b4caf66
LV
1848}
1849
c54fe504
TY
1850static int em_pop(struct x86_emulate_ctxt *ctxt)
1851{
9dac77fa 1852 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
c54fe504
TY
1853}
1854
dde7e6d1 1855static int emulate_popf(struct x86_emulate_ctxt *ctxt,
7b105ca2 1856 void *dest, int len)
9de41573
GN
1857{
1858 int rc;
dde7e6d1 1859 unsigned long val, change_mask;
0efb0440 1860 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
7b105ca2 1861 int cpl = ctxt->ops->cpl(ctxt);
9de41573 1862
3b9be3bf 1863 rc = emulate_pop(ctxt, &val, len);
dde7e6d1
AK
1864 if (rc != X86EMUL_CONTINUE)
1865 return rc;
9de41573 1866
0efb0440
NA
1867 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1868 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1869 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1870 X86_EFLAGS_AC | X86_EFLAGS_ID;
9de41573 1871
dde7e6d1
AK
1872 switch(ctxt->mode) {
1873 case X86EMUL_MODE_PROT64:
1874 case X86EMUL_MODE_PROT32:
1875 case X86EMUL_MODE_PROT16:
1876 if (cpl == 0)
0efb0440 1877 change_mask |= X86_EFLAGS_IOPL;
dde7e6d1 1878 if (cpl <= iopl)
0efb0440 1879 change_mask |= X86_EFLAGS_IF;
dde7e6d1
AK
1880 break;
1881 case X86EMUL_MODE_VM86:
35d3d4a1
AK
1882 if (iopl < 3)
1883 return emulate_gp(ctxt, 0);
0efb0440 1884 change_mask |= X86_EFLAGS_IF;
dde7e6d1
AK
1885 break;
1886 default: /* real mode */
0efb0440 1887 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
dde7e6d1 1888 break;
9de41573 1889 }
dde7e6d1
AK
1890
1891 *(unsigned long *)dest =
1892 (ctxt->eflags & ~change_mask) | (val & change_mask);
1893
1894 return rc;
9de41573
GN
1895}
1896
62aaa2f0
TY
1897static int em_popf(struct x86_emulate_ctxt *ctxt)
1898{
9dac77fa
AK
1899 ctxt->dst.type = OP_REG;
1900 ctxt->dst.addr.reg = &ctxt->eflags;
1901 ctxt->dst.bytes = ctxt->op_bytes;
1902 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
62aaa2f0
TY
1903}
1904
612e89f0
AK
1905static int em_enter(struct x86_emulate_ctxt *ctxt)
1906{
1907 int rc;
1908 unsigned frame_size = ctxt->src.val;
1909 unsigned nesting_level = ctxt->src2.val & 31;
dd856efa 1910 ulong rbp;
612e89f0
AK
1911
1912 if (nesting_level)
1913 return X86EMUL_UNHANDLEABLE;
1914
dd856efa
AK
1915 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1916 rc = push(ctxt, &rbp, stack_size(ctxt));
612e89f0
AK
1917 if (rc != X86EMUL_CONTINUE)
1918 return rc;
dd856efa 1919 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
612e89f0 1920 stack_mask(ctxt));
dd856efa
AK
1921 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1922 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
612e89f0
AK
1923 stack_mask(ctxt));
1924 return X86EMUL_CONTINUE;
1925}
1926
f47cfa31
AK
1927static int em_leave(struct x86_emulate_ctxt *ctxt)
1928{
dd856efa 1929 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
f47cfa31 1930 stack_mask(ctxt));
dd856efa 1931 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
f47cfa31
AK
1932}
1933
1cd196ea 1934static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
7b262e90 1935{
1cd196ea
AK
1936 int seg = ctxt->src2.val;
1937
9dac77fa 1938 ctxt->src.val = get_segment_selector(ctxt, seg);
0fcc207c
NA
1939 if (ctxt->op_bytes == 4) {
1940 rsp_increment(ctxt, -2);
1941 ctxt->op_bytes = 2;
1942 }
7b262e90 1943
4487b3b4 1944 return em_push(ctxt);
7b262e90
GN
1945}
1946
1cd196ea 1947static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
38ba30ba 1948{
1cd196ea 1949 int seg = ctxt->src2.val;
dde7e6d1
AK
1950 unsigned long selector;
1951 int rc;
38ba30ba 1952
3313bc4e 1953 rc = emulate_pop(ctxt, &selector, 2);
dde7e6d1
AK
1954 if (rc != X86EMUL_CONTINUE)
1955 return rc;
1956
6aa5c47c 1957 if (seg == VCPU_SREG_SS)
a5457e7b 1958 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3313bc4e
NA
1959 if (ctxt->op_bytes > 2)
1960 rsp_increment(ctxt, ctxt->op_bytes - 2);
a5457e7b 1961
7b105ca2 1962 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
dde7e6d1 1963 return rc;
38ba30ba
GN
1964}
1965
b96a7fad 1966static int em_pusha(struct x86_emulate_ctxt *ctxt)
38ba30ba 1967{
dd856efa 1968 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
dde7e6d1
AK
1969 int rc = X86EMUL_CONTINUE;
1970 int reg = VCPU_REGS_RAX;
38ba30ba 1971
dde7e6d1
AK
1972 while (reg <= VCPU_REGS_RDI) {
1973 (reg == VCPU_REGS_RSP) ?
dd856efa 1974 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
38ba30ba 1975
4487b3b4 1976 rc = em_push(ctxt);
dde7e6d1
AK
1977 if (rc != X86EMUL_CONTINUE)
1978 return rc;
38ba30ba 1979
dde7e6d1 1980 ++reg;
38ba30ba 1981 }
38ba30ba 1982
dde7e6d1 1983 return rc;
38ba30ba
GN
1984}
1985
62aaa2f0
TY
1986static int em_pushf(struct x86_emulate_ctxt *ctxt)
1987{
0efb0440 1988 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
62aaa2f0
TY
1989 return em_push(ctxt);
1990}
1991
b96a7fad 1992static int em_popa(struct x86_emulate_ctxt *ctxt)
38ba30ba 1993{
dde7e6d1
AK
1994 int rc = X86EMUL_CONTINUE;
1995 int reg = VCPU_REGS_RDI;
6fd8e127 1996 u32 val;
38ba30ba 1997
dde7e6d1
AK
1998 while (reg >= VCPU_REGS_RAX) {
1999 if (reg == VCPU_REGS_RSP) {
5ad105e5 2000 rsp_increment(ctxt, ctxt->op_bytes);
dde7e6d1
AK
2001 --reg;
2002 }
38ba30ba 2003
6fd8e127 2004 rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
dde7e6d1
AK
2005 if (rc != X86EMUL_CONTINUE)
2006 break;
6fd8e127 2007 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
dde7e6d1 2008 --reg;
38ba30ba 2009 }
dde7e6d1 2010 return rc;
38ba30ba
GN
2011}
2012
dd856efa 2013static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
6e154e56 2014{
0225fb50 2015 const struct x86_emulate_ops *ops = ctxt->ops;
5c56e1cf 2016 int rc;
6e154e56
MG
2017 struct desc_ptr dt;
2018 gva_t cs_addr;
2019 gva_t eip_addr;
2020 u16 cs, eip;
6e154e56
MG
2021
2022 /* TODO: Add limit checks */
9dac77fa 2023 ctxt->src.val = ctxt->eflags;
4487b3b4 2024 rc = em_push(ctxt);
5c56e1cf
AK
2025 if (rc != X86EMUL_CONTINUE)
2026 return rc;
6e154e56 2027
0efb0440 2028 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
6e154e56 2029
9dac77fa 2030 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
4487b3b4 2031 rc = em_push(ctxt);
5c56e1cf
AK
2032 if (rc != X86EMUL_CONTINUE)
2033 return rc;
6e154e56 2034
9dac77fa 2035 ctxt->src.val = ctxt->_eip;
4487b3b4 2036 rc = em_push(ctxt);
5c56e1cf
AK
2037 if (rc != X86EMUL_CONTINUE)
2038 return rc;
2039
4bff1e86 2040 ops->get_idt(ctxt, &dt);
6e154e56
MG
2041
2042 eip_addr = dt.address + (irq << 2);
2043 cs_addr = dt.address + (irq << 2) + 2;
2044
79367a65 2045 rc = linear_read_system(ctxt, cs_addr, &cs, 2);
6e154e56
MG
2046 if (rc != X86EMUL_CONTINUE)
2047 return rc;
2048
79367a65 2049 rc = linear_read_system(ctxt, eip_addr, &eip, 2);
6e154e56
MG
2050 if (rc != X86EMUL_CONTINUE)
2051 return rc;
2052
7b105ca2 2053 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
6e154e56
MG
2054 if (rc != X86EMUL_CONTINUE)
2055 return rc;
2056
9dac77fa 2057 ctxt->_eip = eip;
6e154e56
MG
2058
2059 return rc;
2060}
2061
dd856efa
AK
2062int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2063{
2064 int rc;
2065
2066 invalidate_registers(ctxt);
2067 rc = __emulate_int_real(ctxt, irq);
2068 if (rc == X86EMUL_CONTINUE)
2069 writeback_registers(ctxt);
2070 return rc;
2071}
2072
7b105ca2 2073static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
6e154e56
MG
2074{
2075 switch(ctxt->mode) {
2076 case X86EMUL_MODE_REAL:
dd856efa 2077 return __emulate_int_real(ctxt, irq);
6e154e56
MG
2078 case X86EMUL_MODE_VM86:
2079 case X86EMUL_MODE_PROT16:
2080 case X86EMUL_MODE_PROT32:
2081 case X86EMUL_MODE_PROT64:
2082 default:
2083 /* Protected mode interrupts unimplemented yet */
2084 return X86EMUL_UNHANDLEABLE;
2085 }
2086}
2087
7b105ca2 2088static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
38ba30ba 2089{
dde7e6d1
AK
2090 int rc = X86EMUL_CONTINUE;
2091 unsigned long temp_eip = 0;
2092 unsigned long temp_eflags = 0;
2093 unsigned long cs = 0;
0efb0440
NA
2094 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2095 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2096 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2097 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2098 X86_EFLAGS_AC | X86_EFLAGS_ID |
35fd68a3 2099 X86_EFLAGS_FIXED;
0efb0440
NA
2100 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2101 X86_EFLAGS_VIP;
38ba30ba 2102
dde7e6d1 2103 /* TODO: Add stack limit check */
38ba30ba 2104
9dac77fa 2105 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
38ba30ba 2106
dde7e6d1
AK
2107 if (rc != X86EMUL_CONTINUE)
2108 return rc;
38ba30ba 2109
35d3d4a1
AK
2110 if (temp_eip & ~0xffff)
2111 return emulate_gp(ctxt, 0);
38ba30ba 2112
9dac77fa 2113 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
38ba30ba 2114
dde7e6d1
AK
2115 if (rc != X86EMUL_CONTINUE)
2116 return rc;
38ba30ba 2117
9dac77fa 2118 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
38ba30ba 2119
dde7e6d1
AK
2120 if (rc != X86EMUL_CONTINUE)
2121 return rc;
38ba30ba 2122
7b105ca2 2123 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
38ba30ba 2124
dde7e6d1
AK
2125 if (rc != X86EMUL_CONTINUE)
2126 return rc;
38ba30ba 2127
9dac77fa 2128 ctxt->_eip = temp_eip;
38ba30ba 2129
9dac77fa 2130 if (ctxt->op_bytes == 4)
dde7e6d1 2131 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
9dac77fa 2132 else if (ctxt->op_bytes == 2) {
dde7e6d1
AK
2133 ctxt->eflags &= ~0xffff;
2134 ctxt->eflags |= temp_eflags;
38ba30ba 2135 }
dde7e6d1
AK
2136
2137 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
35fd68a3 2138 ctxt->eflags |= X86_EFLAGS_FIXED;
801806d9 2139 ctxt->ops->set_nmi_mask(ctxt, false);
dde7e6d1
AK
2140
2141 return rc;
38ba30ba
GN
2142}
2143
e01991e7 2144static int em_iret(struct x86_emulate_ctxt *ctxt)
c37eda13 2145{
dde7e6d1
AK
2146 switch(ctxt->mode) {
2147 case X86EMUL_MODE_REAL:
7b105ca2 2148 return emulate_iret_real(ctxt);
dde7e6d1
AK
2149 case X86EMUL_MODE_VM86:
2150 case X86EMUL_MODE_PROT16:
2151 case X86EMUL_MODE_PROT32:
2152 case X86EMUL_MODE_PROT64:
c37eda13 2153 default:
dde7e6d1
AK
2154 /* iret from protected mode unimplemented yet */
2155 return X86EMUL_UNHANDLEABLE;
c37eda13 2156 }
c37eda13
WY
2157}
2158
d2f62766
TY
2159static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2160{
d2f62766 2161 int rc;
2117d539
RK
2162 unsigned short sel;
2163 struct desc_struct new_desc;
d1442d85
NA
2164 u8 cpl = ctxt->ops->cpl(ctxt);
2165
9dac77fa 2166 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
d2f62766 2167
3dc4bc4f
NA
2168 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2169 X86_TRANSFER_CALL_JMP,
d1442d85 2170 &new_desc);
d2f62766
TY
2171 if (rc != X86EMUL_CONTINUE)
2172 return rc;
2173
d087e0f7 2174 rc = assign_eip_far(ctxt, ctxt->src.val);
2117d539
RK
2175 /* Error handling is not implemented. */
2176 if (rc != X86EMUL_CONTINUE)
2177 return X86EMUL_UNHANDLEABLE;
2178
d1442d85 2179 return rc;
d2f62766
TY
2180}
2181
f7784046 2182static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
8cdbd2c9 2183{
f7784046
NA
2184 return assign_eip_near(ctxt, ctxt->src.val);
2185}
8cdbd2c9 2186
f7784046
NA
2187static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2188{
2189 int rc;
2190 long int old_eip;
2191
2192 old_eip = ctxt->_eip;
2193 rc = assign_eip_near(ctxt, ctxt->src.val);
2194 if (rc != X86EMUL_CONTINUE)
2195 return rc;
2196 ctxt->src.val = old_eip;
2197 rc = em_push(ctxt);
4179bb02 2198 return rc;
8cdbd2c9
LV
2199}
2200
e0dac408 2201static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
8cdbd2c9 2202{
9dac77fa 2203 u64 old = ctxt->dst.orig_val64;
8cdbd2c9 2204
aaa05f24
NA
2205 if (ctxt->dst.bytes == 16)
2206 return X86EMUL_UNHANDLEABLE;
2207
dd856efa
AK
2208 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2209 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2210 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2211 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
0efb0440 2212 ctxt->eflags &= ~X86_EFLAGS_ZF;
8cdbd2c9 2213 } else {
dd856efa
AK
2214 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2215 (u32) reg_read(ctxt, VCPU_REGS_RBX);
8cdbd2c9 2216
0efb0440 2217 ctxt->eflags |= X86_EFLAGS_ZF;
8cdbd2c9 2218 }
1b30eaa8 2219 return X86EMUL_CONTINUE;
8cdbd2c9
LV
2220}
2221
ebda02c2
TY
2222static int em_ret(struct x86_emulate_ctxt *ctxt)
2223{
234f3ce4
NA
2224 int rc;
2225 unsigned long eip;
2226
2227 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2228 if (rc != X86EMUL_CONTINUE)
2229 return rc;
2230
2231 return assign_eip_near(ctxt, eip);
ebda02c2
TY
2232}
2233
e01991e7 2234static int em_ret_far(struct x86_emulate_ctxt *ctxt)
a77ab5ea 2235{
a77ab5ea 2236 int rc;
d1442d85 2237 unsigned long eip, cs;
9e8919ae 2238 int cpl = ctxt->ops->cpl(ctxt);
2117d539 2239 struct desc_struct new_desc;
a77ab5ea 2240
d1442d85 2241 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
1b30eaa8 2242 if (rc != X86EMUL_CONTINUE)
a77ab5ea 2243 return rc;
9dac77fa 2244 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1b30eaa8 2245 if (rc != X86EMUL_CONTINUE)
a77ab5ea 2246 return rc;
3dc4bc4f
NA
2247 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2248 X86_TRANSFER_RET,
d1442d85
NA
2249 &new_desc);
2250 if (rc != X86EMUL_CONTINUE)
2251 return rc;
d087e0f7 2252 rc = assign_eip_far(ctxt, eip);
2117d539
RK
2253 /* Error handling is not implemented. */
2254 if (rc != X86EMUL_CONTINUE)
2255 return X86EMUL_UNHANDLEABLE;
2256
a77ab5ea
AK
2257 return rc;
2258}
2259
3261107e
BR
2260static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2261{
2262 int rc;
2263
2264 rc = em_ret_far(ctxt);
2265 if (rc != X86EMUL_CONTINUE)
2266 return rc;
2267 rsp_increment(ctxt, ctxt->src.val);
2268 return X86EMUL_CONTINUE;
2269}
2270
e940b5c2
TY
2271static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2272{
2273 /* Save real source value, then compare EAX against destination. */
37c564f2
NA
2274 ctxt->dst.orig_val = ctxt->dst.val;
2275 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
e940b5c2 2276 ctxt->src.orig_val = ctxt->src.val;
37c564f2 2277 ctxt->src.val = ctxt->dst.orig_val;
158de57f 2278 fastop(ctxt, em_cmp);
e940b5c2 2279
0efb0440 2280 if (ctxt->eflags & X86_EFLAGS_ZF) {
2fcf5c8a
NA
2281 /* Success: write back to memory; no update of EAX */
2282 ctxt->src.type = OP_NONE;
e940b5c2
TY
2283 ctxt->dst.val = ctxt->src.orig_val;
2284 } else {
2285 /* Failure: write the value we saw to EAX. */
2fcf5c8a
NA
2286 ctxt->src.type = OP_REG;
2287 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2288 ctxt->src.val = ctxt->dst.orig_val;
2289 /* Create write-cycle to dest by writing the same value */
37c564f2 2290 ctxt->dst.val = ctxt->dst.orig_val;
e940b5c2
TY
2291 }
2292 return X86EMUL_CONTINUE;
2293}
2294
d4b4325f 2295static int em_lseg(struct x86_emulate_ctxt *ctxt)
09b5f4d3 2296{
d4b4325f 2297 int seg = ctxt->src2.val;
09b5f4d3
WY
2298 unsigned short sel;
2299 int rc;
2300
9dac77fa 2301 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
09b5f4d3 2302
7b105ca2 2303 rc = load_segment_descriptor(ctxt, sel, seg);
09b5f4d3
WY
2304 if (rc != X86EMUL_CONTINUE)
2305 return rc;
2306
9dac77fa 2307 ctxt->dst.val = ctxt->src.val;
09b5f4d3
WY
2308 return rc;
2309}
2310
64d60670
PB
2311static int em_rsm(struct x86_emulate_ctxt *ctxt)
2312{
6ed071f0 2313 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
64d60670
PB
2314 return emulate_ud(ctxt);
2315
f1554150
PB
2316 if (ctxt->ops->leave_smm(ctxt))
2317 ctxt->ops->triple_fault(ctxt);
8f4dc2e7 2318
055f37f8 2319 return emulator_recalc_and_set_mode(ctxt);
64d60670
PB
2320}
2321
7b105ca2 2322static void
09d9423d 2323setup_syscalls_segments(struct desc_struct *cs, struct desc_struct *ss)
e66bb2cc 2324{
e66bb2cc 2325 cs->l = 0; /* will be adjusted later */
79168fd1 2326 set_desc_base(cs, 0); /* flat segment */
e66bb2cc 2327 cs->g = 1; /* 4kb granularity */
79168fd1 2328 set_desc_limit(cs, 0xfffff); /* 4GB limit */
e66bb2cc
AP
2329 cs->type = 0x0b; /* Read, Execute, Accessed */
2330 cs->s = 1;
2331 cs->dpl = 0; /* will be adjusted later */
79168fd1
GN
2332 cs->p = 1;
2333 cs->d = 1;
99245b50 2334 cs->avl = 0;
e66bb2cc 2335
79168fd1
GN
2336 set_desc_base(ss, 0); /* flat segment */
2337 set_desc_limit(ss, 0xfffff); /* 4GB limit */
e66bb2cc
AP
2338 ss->g = 1; /* 4kb granularity */
2339 ss->s = 1;
2340 ss->type = 0x03; /* Read/Write, Accessed */
79168fd1 2341 ss->d = 1; /* 32bit stack segment */
e66bb2cc 2342 ss->dpl = 0;
79168fd1 2343 ss->p = 1;
99245b50
GN
2344 ss->l = 0;
2345 ss->avl = 0;
e66bb2cc
AP
2346}
2347
1a18a69b
AK
2348static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2349{
2350 u32 eax, ebx, ecx, edx;
2351
2352 eax = ecx = 0;
f91af517 2353 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
15608ed0 2354 return is_guest_vendor_intel(ebx, ecx, edx);
1a18a69b
AK
2355}
2356
c2226fc9
SB
2357static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2358{
0225fb50 2359 const struct x86_emulate_ops *ops = ctxt->ops;
c2226fc9
SB
2360 u32 eax, ebx, ecx, edx;
2361
2362 /*
2363 * syscall should always be enabled in longmode - so only become
2364 * vendor specific (cpuid) if other modes are active...
2365 */
2366 if (ctxt->mode == X86EMUL_MODE_PROT64)
2367 return true;
2368
2369 eax = 0x00000000;
2370 ecx = 0x00000000;
f91af517 2371 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
0017f93a 2372 /*
15608ed0
SC
2373 * remark: Intel CPUs only support "syscall" in 64bit longmode. Also a
2374 * 64bit guest with a 32bit compat-app running will #UD !! While this
2375 * behaviour can be fixed (by emulating) into AMD response - CPUs of
2376 * AMD can't behave like Intel.
0017f93a 2377 */
15608ed0 2378 if (is_guest_vendor_intel(ebx, ecx, edx))
0017f93a
AK
2379 return false;
2380
15608ed0
SC
2381 if (is_guest_vendor_amd(ebx, ecx, edx) ||
2382 is_guest_vendor_hygon(ebx, ecx, edx))
b8f4abb6
PW
2383 return true;
2384
2385 /*
2386 * default: (not Intel, not AMD, not Hygon), apply Intel's
2387 * stricter rules...
2388 */
c2226fc9
SB
2389 return false;
2390}
2391
e01991e7 2392static int em_syscall(struct x86_emulate_ctxt *ctxt)
e66bb2cc 2393{
0225fb50 2394 const struct x86_emulate_ops *ops = ctxt->ops;
79168fd1 2395 struct desc_struct cs, ss;
e66bb2cc 2396 u64 msr_data;
79168fd1 2397 u16 cs_sel, ss_sel;
c2ad2bb3 2398 u64 efer = 0;
e66bb2cc
AP
2399
2400 /* syscall is not available in real mode */
2e901c4c 2401 if (ctxt->mode == X86EMUL_MODE_REAL ||
35d3d4a1
AK
2402 ctxt->mode == X86EMUL_MODE_VM86)
2403 return emulate_ud(ctxt);
e66bb2cc 2404
c2226fc9
SB
2405 if (!(em_syscall_is_enabled(ctxt)))
2406 return emulate_ud(ctxt);
2407
c2ad2bb3 2408 ops->get_msr(ctxt, MSR_EFER, &efer);
c2226fc9
SB
2409 if (!(efer & EFER_SCE))
2410 return emulate_ud(ctxt);
2411
09d9423d 2412 setup_syscalls_segments(&cs, &ss);
717746e3 2413 ops->get_msr(ctxt, MSR_STAR, &msr_data);
e66bb2cc 2414 msr_data >>= 32;
79168fd1
GN
2415 cs_sel = (u16)(msr_data & 0xfffc);
2416 ss_sel = (u16)(msr_data + 8);
e66bb2cc 2417
c2ad2bb3 2418 if (efer & EFER_LMA) {
79168fd1 2419 cs.d = 0;
e66bb2cc
AP
2420 cs.l = 1;
2421 }
1aa36616
AK
2422 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2423 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
e66bb2cc 2424
dd856efa 2425 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
c2ad2bb3 2426 if (efer & EFER_LMA) {
e66bb2cc 2427#ifdef CONFIG_X86_64
6c6cb69b 2428 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
e66bb2cc 2429
717746e3 2430 ops->get_msr(ctxt,
3fb1b5db
GN
2431 ctxt->mode == X86EMUL_MODE_PROT64 ?
2432 MSR_LSTAR : MSR_CSTAR, &msr_data);
9dac77fa 2433 ctxt->_eip = msr_data;
e66bb2cc 2434
717746e3 2435 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
6c6cb69b 2436 ctxt->eflags &= ~msr_data;
35fd68a3 2437 ctxt->eflags |= X86_EFLAGS_FIXED;
e66bb2cc
AP
2438#endif
2439 } else {
2440 /* legacy mode */
717746e3 2441 ops->get_msr(ctxt, MSR_STAR, &msr_data);
9dac77fa 2442 ctxt->_eip = (u32)msr_data;
e66bb2cc 2443
0efb0440 2444 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
e66bb2cc
AP
2445 }
2446
c8401dda 2447 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
e54cfa97 2448 return X86EMUL_CONTINUE;
e66bb2cc
AP
2449}
2450
e01991e7 2451static int em_sysenter(struct x86_emulate_ctxt *ctxt)
8c604352 2452{
0225fb50 2453 const struct x86_emulate_ops *ops = ctxt->ops;
79168fd1 2454 struct desc_struct cs, ss;
8c604352 2455 u64 msr_data;
79168fd1 2456 u16 cs_sel, ss_sel;
c2ad2bb3 2457 u64 efer = 0;
8c604352 2458
7b105ca2 2459 ops->get_msr(ctxt, MSR_EFER, &efer);
a0044755 2460 /* inject #GP if in real mode */
35d3d4a1
AK
2461 if (ctxt->mode == X86EMUL_MODE_REAL)
2462 return emulate_gp(ctxt, 0);
8c604352 2463
1a18a69b
AK
2464 /*
2465 * Not recognized on AMD in compat mode (but is recognized in legacy
2466 * mode).
2467 */
f3747379 2468 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
1a18a69b
AK
2469 && !vendor_intel(ctxt))
2470 return emulate_ud(ctxt);
2471
b2c9d43e 2472 /* sysenter/sysexit have not been tested in 64bit mode. */
35d3d4a1 2473 if (ctxt->mode == X86EMUL_MODE_PROT64)
b2c9d43e 2474 return X86EMUL_UNHANDLEABLE;
8c604352 2475
717746e3 2476 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
f3747379
NA
2477 if ((msr_data & 0xfffc) == 0x0)
2478 return emulate_gp(ctxt, 0);
8c604352 2479
09d9423d 2480 setup_syscalls_segments(&cs, &ss);
0efb0440 2481 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
b32a9918 2482 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
79168fd1 2483 ss_sel = cs_sel + 8;
f3747379 2484 if (efer & EFER_LMA) {
79168fd1 2485 cs.d = 0;
8c604352
AP
2486 cs.l = 1;
2487 }
2488
1aa36616
AK
2489 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2490 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
8c604352 2491
717746e3 2492 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
f3747379 2493 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
8c604352 2494
717746e3 2495 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
f3747379
NA
2496 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2497 (u32)msr_data;
943dea8a
SC
2498 if (efer & EFER_LMA)
2499 ctxt->mode = X86EMUL_MODE_PROT64;
8c604352 2500
e54cfa97 2501 return X86EMUL_CONTINUE;
8c604352
AP
2502}
2503
e01991e7 2504static int em_sysexit(struct x86_emulate_ctxt *ctxt)
4668f050 2505{
0225fb50 2506 const struct x86_emulate_ops *ops = ctxt->ops;
79168fd1 2507 struct desc_struct cs, ss;
234f3ce4 2508 u64 msr_data, rcx, rdx;
4668f050 2509 int usermode;
1249b96e 2510 u16 cs_sel = 0, ss_sel = 0;
4668f050 2511
a0044755
GN
2512 /* inject #GP if in real mode or Virtual 8086 mode */
2513 if (ctxt->mode == X86EMUL_MODE_REAL ||
35d3d4a1
AK
2514 ctxt->mode == X86EMUL_MODE_VM86)
2515 return emulate_gp(ctxt, 0);
4668f050 2516
09d9423d 2517 setup_syscalls_segments(&cs, &ss);
4668f050 2518
9dac77fa 2519 if ((ctxt->rex_prefix & 0x8) != 0x0)
4668f050
AP
2520 usermode = X86EMUL_MODE_PROT64;
2521 else
2522 usermode = X86EMUL_MODE_PROT32;
2523
234f3ce4
NA
2524 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2525 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2526
4668f050
AP
2527 cs.dpl = 3;
2528 ss.dpl = 3;
717746e3 2529 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
4668f050
AP
2530 switch (usermode) {
2531 case X86EMUL_MODE_PROT32:
79168fd1 2532 cs_sel = (u16)(msr_data + 16);
35d3d4a1
AK
2533 if ((msr_data & 0xfffc) == 0x0)
2534 return emulate_gp(ctxt, 0);
79168fd1 2535 ss_sel = (u16)(msr_data + 24);
bf0b682c
NA
2536 rcx = (u32)rcx;
2537 rdx = (u32)rdx;
4668f050
AP
2538 break;
2539 case X86EMUL_MODE_PROT64:
79168fd1 2540 cs_sel = (u16)(msr_data + 32);
35d3d4a1
AK
2541 if (msr_data == 0x0)
2542 return emulate_gp(ctxt, 0);
79168fd1
GN
2543 ss_sel = cs_sel + 8;
2544 cs.d = 0;
4668f050 2545 cs.l = 1;
fd8cb433
YZ
2546 if (emul_is_noncanonical_address(rcx, ctxt) ||
2547 emul_is_noncanonical_address(rdx, ctxt))
234f3ce4 2548 return emulate_gp(ctxt, 0);
4668f050
AP
2549 break;
2550 }
b32a9918
NA
2551 cs_sel |= SEGMENT_RPL_MASK;
2552 ss_sel |= SEGMENT_RPL_MASK;
4668f050 2553
1aa36616
AK
2554 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2555 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
4668f050 2556
234f3ce4 2557 ctxt->_eip = rdx;
5015bb89 2558 ctxt->mode = usermode;
234f3ce4 2559 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
4668f050 2560
e54cfa97 2561 return X86EMUL_CONTINUE;
4668f050
AP
2562}
2563
7b105ca2 2564static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
f850e2e6
GN
2565{
2566 int iopl;
2567 if (ctxt->mode == X86EMUL_MODE_REAL)
2568 return false;
2569 if (ctxt->mode == X86EMUL_MODE_VM86)
2570 return true;
0efb0440 2571 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
7b105ca2 2572 return ctxt->ops->cpl(ctxt) > iopl;
f850e2e6
GN
2573}
2574
9a29d449
LA
2575#define VMWARE_PORT_VMPORT (0x5658)
2576#define VMWARE_PORT_VMRPC (0x5659)
2577
f850e2e6 2578static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
f850e2e6
GN
2579 u16 port, u16 len)
2580{
0225fb50 2581 const struct x86_emulate_ops *ops = ctxt->ops;
79168fd1 2582 struct desc_struct tr_seg;
5601d05b 2583 u32 base3;
f850e2e6 2584 int r;
1aa36616 2585 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
f850e2e6 2586 unsigned mask = (1 << len) - 1;
5601d05b 2587 unsigned long base;
f850e2e6 2588
9a29d449
LA
2589 /*
2590 * VMware allows access to these ports even if denied
2591 * by TSS I/O permission bitmap. Mimic behavior.
2592 */
2593 if (enable_vmware_backdoor &&
2594 ((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC)))
2595 return true;
2596
1aa36616 2597 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
79168fd1 2598 if (!tr_seg.p)
f850e2e6 2599 return false;
79168fd1 2600 if (desc_limit_scaled(&tr_seg) < 103)
f850e2e6 2601 return false;
5601d05b
GN
2602 base = get_desc_base(&tr_seg);
2603#ifdef CONFIG_X86_64
2604 base |= ((u64)base3) << 32;
2605#endif
3c9fa24c 2606 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
f850e2e6
GN
2607 if (r != X86EMUL_CONTINUE)
2608 return false;
79168fd1 2609 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
f850e2e6 2610 return false;
3c9fa24c 2611 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
f850e2e6
GN
2612 if (r != X86EMUL_CONTINUE)
2613 return false;
2614 if ((perm >> bit_idx) & mask)
2615 return false;
2616 return true;
2617}
2618
2619static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
f850e2e6
GN
2620 u16 port, u16 len)
2621{
4fc40f07
GN
2622 if (ctxt->perm_ok)
2623 return true;
2624
7b105ca2
TY
2625 if (emulator_bad_iopl(ctxt))
2626 if (!emulator_io_port_access_allowed(ctxt, port, len))
f850e2e6 2627 return false;
4fc40f07
GN
2628
2629 ctxt->perm_ok = true;
2630
f850e2e6
GN
2631 return true;
2632}
2633
428e3d08
NA
2634static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2635{
2636 /*
2637 * Intel CPUs mask the counter and pointers in quite strange
2638 * manner when ECX is zero due to REP-string optimizations.
2639 */
2640#ifdef CONFIG_X86_64
2641 if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
2642 return;
2643
2644 *reg_write(ctxt, VCPU_REGS_RCX) = 0;
2645
2646 switch (ctxt->b) {
2647 case 0xa4: /* movsb */
2648 case 0xa5: /* movsd/w */
2649 *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
df561f66 2650 fallthrough;
428e3d08
NA
2651 case 0xaa: /* stosb */
2652 case 0xab: /* stosd/w */
2653 *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
2654 }
2655#endif
2656}
2657
38ba30ba 2658static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
38ba30ba
GN
2659 struct tss_segment_16 *tss)
2660{
9dac77fa 2661 tss->ip = ctxt->_eip;
38ba30ba 2662 tss->flag = ctxt->eflags;
dd856efa
AK
2663 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2664 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2665 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2666 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2667 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2668 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2669 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2670 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
38ba30ba 2671
1aa36616
AK
2672 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2673 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2674 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2675 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2676 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
38ba30ba
GN
2677}
2678
2679static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
38ba30ba
GN
2680 struct tss_segment_16 *tss)
2681{
38ba30ba 2682 int ret;
2356aaeb 2683 u8 cpl;
38ba30ba 2684
9dac77fa 2685 ctxt->_eip = tss->ip;
38ba30ba 2686 ctxt->eflags = tss->flag | 2;
dd856efa
AK
2687 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2688 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2689 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2690 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2691 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2692 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2693 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2694 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
38ba30ba
GN
2695
2696 /*
2697 * SDM says that segment selectors are loaded before segment
2698 * descriptors
2699 */
1aa36616
AK
2700 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2701 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2702 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2703 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2704 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
38ba30ba 2705
2356aaeb
PB
2706 cpl = tss->cs & 3;
2707
38ba30ba 2708 /*
fc058680 2709 * Now load segment descriptors. If fault happens at this stage
38ba30ba
GN
2710 * it is handled in a context of new task
2711 */
d1442d85 2712 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
3dc4bc4f 2713 X86_TRANSFER_TASK_SWITCH, NULL);
38ba30ba
GN
2714 if (ret != X86EMUL_CONTINUE)
2715 return ret;
d1442d85 2716 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3dc4bc4f 2717 X86_TRANSFER_TASK_SWITCH, NULL);
38ba30ba
GN
2718 if (ret != X86EMUL_CONTINUE)
2719 return ret;
d1442d85 2720 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3dc4bc4f 2721 X86_TRANSFER_TASK_SWITCH, NULL);
38ba30ba
GN
2722 if (ret != X86EMUL_CONTINUE)
2723 return ret;
d1442d85 2724 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3dc4bc4f 2725 X86_TRANSFER_TASK_SWITCH, NULL);
38ba30ba
GN
2726 if (ret != X86EMUL_CONTINUE)
2727 return ret;
d1442d85 2728 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3dc4bc4f 2729 X86_TRANSFER_TASK_SWITCH, NULL);
38ba30ba
GN
2730 if (ret != X86EMUL_CONTINUE)
2731 return ret;
2732
2733 return X86EMUL_CONTINUE;
2734}
2735
7127fd36 2736static int task_switch_16(struct x86_emulate_ctxt *ctxt, u16 old_tss_sel,
38ba30ba
GN
2737 ulong old_tss_base, struct desc_struct *new_desc)
2738{
2739 struct tss_segment_16 tss_seg;
2740 int ret;
bcc55cba 2741 u32 new_tss_base = get_desc_base(new_desc);
38ba30ba 2742
0e96f31e 2743 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
db297e3d 2744 if (ret != X86EMUL_CONTINUE)
38ba30ba 2745 return ret;
38ba30ba 2746
7b105ca2 2747 save_state_to_tss16(ctxt, &tss_seg);
38ba30ba 2748
0e96f31e 2749 ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
db297e3d 2750 if (ret != X86EMUL_CONTINUE)
38ba30ba 2751 return ret;
38ba30ba 2752
0e96f31e 2753 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
db297e3d 2754 if (ret != X86EMUL_CONTINUE)
38ba30ba 2755 return ret;
38ba30ba
GN
2756
2757 if (old_tss_sel != 0xffff) {
2758 tss_seg.prev_task_link = old_tss_sel;
2759
79367a65
PB
2760 ret = linear_write_system(ctxt, new_tss_base,
2761 &tss_seg.prev_task_link,
0e96f31e 2762 sizeof(tss_seg.prev_task_link));
db297e3d 2763 if (ret != X86EMUL_CONTINUE)
38ba30ba 2764 return ret;
38ba30ba
GN
2765 }
2766
7b105ca2 2767 return load_state_from_tss16(ctxt, &tss_seg);
38ba30ba
GN
2768}
2769
2770static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
38ba30ba
GN
2771 struct tss_segment_32 *tss)
2772{
5c7411e2 2773 /* CR3 and ldt selector are not saved intentionally */
9dac77fa 2774 tss->eip = ctxt->_eip;
38ba30ba 2775 tss->eflags = ctxt->eflags;
dd856efa
AK
2776 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2777 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2778 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2779 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2780 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2781 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2782 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2783 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
38ba30ba 2784
1aa36616
AK
2785 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2786 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2787 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2788 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2789 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2790 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
38ba30ba
GN
2791}
2792
2793static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
38ba30ba
GN
2794 struct tss_segment_32 *tss)
2795{
38ba30ba 2796 int ret;
2356aaeb 2797 u8 cpl;
38ba30ba 2798
7b105ca2 2799 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
35d3d4a1 2800 return emulate_gp(ctxt, 0);
9dac77fa 2801 ctxt->_eip = tss->eip;
38ba30ba 2802 ctxt->eflags = tss->eflags | 2;
4cee4798
KW
2803
2804 /* General purpose registers */
dd856efa
AK
2805 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2806 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2807 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2808 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2809 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2810 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2811 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2812 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
38ba30ba
GN
2813
2814 /*
2815 * SDM says that segment selectors are loaded before segment
2356aaeb
PB
2816 * descriptors. This is important because CPL checks will
2817 * use CS.RPL.
38ba30ba 2818 */
1aa36616
AK
2819 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2820 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2821 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2822 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2823 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2824 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2825 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
38ba30ba 2826
4cee4798
KW
2827 /*
2828 * If we're switching between Protected Mode and VM86, we need to make
2829 * sure to update the mode before loading the segment descriptors so
2830 * that the selectors are interpreted correctly.
4cee4798 2831 */
2356aaeb 2832 if (ctxt->eflags & X86_EFLAGS_VM) {
4cee4798 2833 ctxt->mode = X86EMUL_MODE_VM86;
2356aaeb
PB
2834 cpl = 3;
2835 } else {
4cee4798 2836 ctxt->mode = X86EMUL_MODE_PROT32;
2356aaeb
PB
2837 cpl = tss->cs & 3;
2838 }
4cee4798 2839
38ba30ba 2840 /*
d9f6e12f 2841 * Now load segment descriptors. If fault happens at this stage
38ba30ba
GN
2842 * it is handled in a context of new task
2843 */
d1442d85 2844 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3dc4bc4f 2845 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
38ba30ba
GN
2846 if (ret != X86EMUL_CONTINUE)
2847 return ret;
d1442d85 2848 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3dc4bc4f 2849 X86_TRANSFER_TASK_SWITCH, NULL);
38ba30ba
GN
2850 if (ret != X86EMUL_CONTINUE)
2851 return ret;
d1442d85 2852 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3dc4bc4f 2853 X86_TRANSFER_TASK_SWITCH, NULL);
38ba30ba
GN
2854 if (ret != X86EMUL_CONTINUE)
2855 return ret;
d1442d85 2856 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3dc4bc4f 2857 X86_TRANSFER_TASK_SWITCH, NULL);
38ba30ba
GN
2858 if (ret != X86EMUL_CONTINUE)
2859 return ret;
d1442d85 2860 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3dc4bc4f 2861 X86_TRANSFER_TASK_SWITCH, NULL);
38ba30ba
GN
2862 if (ret != X86EMUL_CONTINUE)
2863 return ret;
d1442d85 2864 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3dc4bc4f 2865 X86_TRANSFER_TASK_SWITCH, NULL);
38ba30ba
GN
2866 if (ret != X86EMUL_CONTINUE)
2867 return ret;
d1442d85 2868 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3dc4bc4f 2869 X86_TRANSFER_TASK_SWITCH, NULL);
38ba30ba 2870
2f729b10 2871 return ret;
38ba30ba
GN
2872}
2873
7127fd36 2874static int task_switch_32(struct x86_emulate_ctxt *ctxt, u16 old_tss_sel,
38ba30ba
GN
2875 ulong old_tss_base, struct desc_struct *new_desc)
2876{
2877 struct tss_segment_32 tss_seg;
2878 int ret;
bcc55cba 2879 u32 new_tss_base = get_desc_base(new_desc);
5c7411e2
NA
2880 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2881 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
38ba30ba 2882
0e96f31e 2883 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
db297e3d 2884 if (ret != X86EMUL_CONTINUE)
38ba30ba 2885 return ret;
38ba30ba 2886
7b105ca2 2887 save_state_to_tss32(ctxt, &tss_seg);
38ba30ba 2888
5c7411e2 2889 /* Only GP registers and segment selectors are saved */
79367a65
PB
2890 ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2891 ldt_sel_offset - eip_offset);
db297e3d 2892 if (ret != X86EMUL_CONTINUE)
38ba30ba 2893 return ret;
38ba30ba 2894
0e96f31e 2895 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
db297e3d 2896 if (ret != X86EMUL_CONTINUE)
38ba30ba 2897 return ret;
38ba30ba
GN
2898
2899 if (old_tss_sel != 0xffff) {
2900 tss_seg.prev_task_link = old_tss_sel;
2901
79367a65
PB
2902 ret = linear_write_system(ctxt, new_tss_base,
2903 &tss_seg.prev_task_link,
0e96f31e 2904 sizeof(tss_seg.prev_task_link));
db297e3d 2905 if (ret != X86EMUL_CONTINUE)
38ba30ba 2906 return ret;
38ba30ba
GN
2907 }
2908
7b105ca2 2909 return load_state_from_tss32(ctxt, &tss_seg);
38ba30ba
GN
2910}
2911
2912static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
7f3d35fd 2913 u16 tss_selector, int idt_index, int reason,
e269fb21 2914 bool has_error_code, u32 error_code)
38ba30ba 2915{
0225fb50 2916 const struct x86_emulate_ops *ops = ctxt->ops;
38ba30ba
GN
2917 struct desc_struct curr_tss_desc, next_tss_desc;
2918 int ret;
1aa36616 2919 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
38ba30ba 2920 ulong old_tss_base =
4bff1e86 2921 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
ceffb459 2922 u32 desc_limit;
3db176d5 2923 ulong desc_addr, dr7;
38ba30ba
GN
2924
2925 /* FIXME: old_tss_base == ~0 ? */
2926
e919464b 2927 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
38ba30ba
GN
2928 if (ret != X86EMUL_CONTINUE)
2929 return ret;
e919464b 2930 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
38ba30ba
GN
2931 if (ret != X86EMUL_CONTINUE)
2932 return ret;
2933
2934 /* FIXME: check that next_tss_desc is tss */
2935
7f3d35fd
KW
2936 /*
2937 * Check privileges. The three cases are task switch caused by...
2938 *
2939 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2940 * 2. Exception/IRQ/iret: No check is performed
2c2ca2d1
NA
2941 * 3. jmp/call to TSS/task-gate: No check is performed since the
2942 * hardware checks it before exiting.
7f3d35fd
KW
2943 */
2944 if (reason == TASK_SWITCH_GATE) {
2945 if (idt_index != -1) {
2946 /* Software interrupts */
2947 struct desc_struct task_gate_desc;
2948 int dpl;
2949
2950 ret = read_interrupt_descriptor(ctxt, idt_index,
2951 &task_gate_desc);
2952 if (ret != X86EMUL_CONTINUE)
2953 return ret;
2954
2955 dpl = task_gate_desc.dpl;
2956 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2957 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2958 }
38ba30ba
GN
2959 }
2960
ceffb459
GN
2961 desc_limit = desc_limit_scaled(&next_tss_desc);
2962 if (!next_tss_desc.p ||
2963 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2964 desc_limit < 0x2b)) {
592f0858 2965 return emulate_ts(ctxt, tss_selector & 0xfffc);
38ba30ba
GN
2966 }
2967
2968 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2969 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
7b105ca2 2970 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
38ba30ba
GN
2971 }
2972
2973 if (reason == TASK_SWITCH_IRET)
2974 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2975
2976 /* set back link to prev task only if NT bit is set in eflags
fc058680 2977 note that old_tss_sel is not used after this point */
38ba30ba
GN
2978 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2979 old_tss_sel = 0xffff;
2980
2981 if (next_tss_desc.type & 8)
7127fd36 2982 ret = task_switch_32(ctxt, old_tss_sel, old_tss_base, &next_tss_desc);
38ba30ba 2983 else
7127fd36 2984 ret = task_switch_16(ctxt, old_tss_sel,
38ba30ba 2985 old_tss_base, &next_tss_desc);
0760d448
JK
2986 if (ret != X86EMUL_CONTINUE)
2987 return ret;
38ba30ba
GN
2988
2989 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2990 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2991
2992 if (reason != TASK_SWITCH_IRET) {
2993 next_tss_desc.type |= (1 << 1); /* set busy flag */
7b105ca2 2994 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
38ba30ba
GN
2995 }
2996
717746e3 2997 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
1aa36616 2998 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
38ba30ba 2999
e269fb21 3000 if (has_error_code) {
9dac77fa
AK
3001 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3002 ctxt->lock_prefix = 0;
3003 ctxt->src.val = (unsigned long) error_code;
4487b3b4 3004 ret = em_push(ctxt);
e269fb21
JK
3005 }
3006
3db176d5
NA
3007 ops->get_dr(ctxt, 7, &dr7);
3008 ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3009
38ba30ba
GN
3010 return ret;
3011}
3012
3013int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
7f3d35fd 3014 u16 tss_selector, int idt_index, int reason,
e269fb21 3015 bool has_error_code, u32 error_code)
38ba30ba 3016{
38ba30ba
GN
3017 int rc;
3018
dd856efa 3019 invalidate_registers(ctxt);
9dac77fa
AK
3020 ctxt->_eip = ctxt->eip;
3021 ctxt->dst.type = OP_NONE;
38ba30ba 3022
7f3d35fd 3023 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
e269fb21 3024 has_error_code, error_code);
38ba30ba 3025
dd856efa 3026 if (rc == X86EMUL_CONTINUE) {
9dac77fa 3027 ctxt->eip = ctxt->_eip;
dd856efa
AK
3028 writeback_registers(ctxt);
3029 }
38ba30ba 3030
a0c0ab2f 3031 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
38ba30ba
GN
3032}
3033
f3bd64c6
GN
3034static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3035 struct operand *op)
a682e354 3036{
0efb0440 3037 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
a682e354 3038
01485a22
PB
3039 register_address_increment(ctxt, reg, df * op->bytes);
3040 op->addr.mem.ea = register_address(ctxt, reg);
a682e354
GN
3041}
3042
7af04fc0
AK
3043static int em_das(struct x86_emulate_ctxt *ctxt)
3044{
7af04fc0
AK
3045 u8 al, old_al;
3046 bool af, cf, old_cf;
3047
3048 cf = ctxt->eflags & X86_EFLAGS_CF;
9dac77fa 3049 al = ctxt->dst.val;
7af04fc0
AK
3050
3051 old_al = al;
3052 old_cf = cf;
3053 cf = false;
3054 af = ctxt->eflags & X86_EFLAGS_AF;
3055 if ((al & 0x0f) > 9 || af) {
3056 al -= 6;
3057 cf = old_cf | (al >= 250);
3058 af = true;
3059 } else {
3060 af = false;
3061 }
3062 if (old_al > 0x99 || old_cf) {
3063 al -= 0x60;
3064 cf = true;
3065 }
3066
9dac77fa 3067 ctxt->dst.val = al;
7af04fc0 3068 /* Set PF, ZF, SF */
9dac77fa
AK
3069 ctxt->src.type = OP_IMM;
3070 ctxt->src.val = 0;
3071 ctxt->src.bytes = 1;
158de57f 3072 fastop(ctxt, em_or);
7af04fc0
AK
3073 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3074 if (cf)
3075 ctxt->eflags |= X86_EFLAGS_CF;
3076 if (af)
3077 ctxt->eflags |= X86_EFLAGS_AF;
3078 return X86EMUL_CONTINUE;
3079}
3080
a035d5c6
PB
3081static int em_aam(struct x86_emulate_ctxt *ctxt)
3082{
3083 u8 al, ah;
3084
3085 if (ctxt->src.val == 0)
3086 return emulate_de(ctxt);
3087
3088 al = ctxt->dst.val & 0xff;
3089 ah = al / ctxt->src.val;
3090 al %= ctxt->src.val;
3091
3092 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3093
3094 /* Set PF, ZF, SF */
3095 ctxt->src.type = OP_IMM;
3096 ctxt->src.val = 0;
3097 ctxt->src.bytes = 1;
3098 fastop(ctxt, em_or);
3099
3100 return X86EMUL_CONTINUE;
3101}
3102
7f662273
GN
3103static int em_aad(struct x86_emulate_ctxt *ctxt)
3104{
3105 u8 al = ctxt->dst.val & 0xff;
3106 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3107
3108 al = (al + (ah * ctxt->src.val)) & 0xff;
3109
3110 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3111
f583c29b
GN
3112 /* Set PF, ZF, SF */
3113 ctxt->src.type = OP_IMM;
3114 ctxt->src.val = 0;
3115 ctxt->src.bytes = 1;
3116 fastop(ctxt, em_or);
7f662273
GN
3117
3118 return X86EMUL_CONTINUE;
3119}
3120
d4ddafcd
TY
3121static int em_call(struct x86_emulate_ctxt *ctxt)
3122{
234f3ce4 3123 int rc;
d4ddafcd
TY
3124 long rel = ctxt->src.val;
3125
3126 ctxt->src.val = (unsigned long)ctxt->_eip;
234f3ce4
NA
3127 rc = jmp_rel(ctxt, rel);
3128 if (rc != X86EMUL_CONTINUE)
3129 return rc;
d4ddafcd
TY
3130 return em_push(ctxt);
3131}
3132
0ef753b8
AK
3133static int em_call_far(struct x86_emulate_ctxt *ctxt)
3134{
0ef753b8
AK
3135 u16 sel, old_cs;
3136 ulong old_eip;
3137 int rc;
d1442d85
NA
3138 struct desc_struct old_desc, new_desc;
3139 const struct x86_emulate_ops *ops = ctxt->ops;
3140 int cpl = ctxt->ops->cpl(ctxt);
82268083 3141 enum x86emul_mode prev_mode = ctxt->mode;
0ef753b8 3142
9dac77fa 3143 old_eip = ctxt->_eip;
d1442d85 3144 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
0ef753b8 3145
9dac77fa 3146 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3dc4bc4f
NA
3147 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3148 X86_TRANSFER_CALL_JMP, &new_desc);
d1442d85 3149 if (rc != X86EMUL_CONTINUE)
80976dbb 3150 return rc;
0ef753b8 3151
d087e0f7 3152 rc = assign_eip_far(ctxt, ctxt->src.val);
d1442d85
NA
3153 if (rc != X86EMUL_CONTINUE)
3154 goto fail;
0ef753b8 3155
9dac77fa 3156 ctxt->src.val = old_cs;
4487b3b4 3157 rc = em_push(ctxt);
0ef753b8 3158 if (rc != X86EMUL_CONTINUE)
d1442d85 3159 goto fail;
0ef753b8 3160
9dac77fa 3161 ctxt->src.val = old_eip;
d1442d85
NA
3162 rc = em_push(ctxt);
3163 /* If we failed, we tainted the memory, but the very least we should
3164 restore cs */
82268083
NA
3165 if (rc != X86EMUL_CONTINUE) {
3166 pr_warn_once("faulting far call emulation tainted memory\n");
d1442d85 3167 goto fail;
82268083 3168 }
d1442d85
NA
3169 return rc;
3170fail:
3171 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
82268083 3172 ctxt->mode = prev_mode;
d1442d85
NA
3173 return rc;
3174
0ef753b8
AK
3175}
3176
40ece7c7
AK
3177static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3178{
40ece7c7 3179 int rc;
234f3ce4 3180 unsigned long eip;
40ece7c7 3181
234f3ce4
NA
3182 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3183 if (rc != X86EMUL_CONTINUE)
3184 return rc;
3185 rc = assign_eip_near(ctxt, eip);
40ece7c7
AK
3186 if (rc != X86EMUL_CONTINUE)
3187 return rc;
5ad105e5 3188 rsp_increment(ctxt, ctxt->src.val);
40ece7c7
AK
3189 return X86EMUL_CONTINUE;
3190}
3191
e4f973ae
TY
3192static int em_xchg(struct x86_emulate_ctxt *ctxt)
3193{
e4f973ae 3194 /* Write back the register source. */
9dac77fa
AK
3195 ctxt->src.val = ctxt->dst.val;
3196 write_register_operand(&ctxt->src);
e4f973ae
TY
3197
3198 /* Write back the memory destination with implicit LOCK prefix. */
9dac77fa
AK
3199 ctxt->dst.val = ctxt->src.orig_val;
3200 ctxt->lock_prefix = 1;
e4f973ae
TY
3201 return X86EMUL_CONTINUE;
3202}
3203
5c82aa29
AK
3204static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3205{
9dac77fa 3206 ctxt->dst.val = ctxt->src2.val;
4d758349 3207 return fastop(ctxt, em_imul);
5c82aa29
AK
3208}
3209
61429142
AK
3210static int em_cwd(struct x86_emulate_ctxt *ctxt)
3211{
9dac77fa
AK
3212 ctxt->dst.type = OP_REG;
3213 ctxt->dst.bytes = ctxt->src.bytes;
dd856efa 3214 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
9dac77fa 3215 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
61429142
AK
3216
3217 return X86EMUL_CONTINUE;
3218}
3219
fb6d4d34
PB
3220static int em_rdpid(struct x86_emulate_ctxt *ctxt)
3221{
3222 u64 tsc_aux = 0;
3223
a836839c 3224 if (!ctxt->ops->guest_has_rdpid(ctxt))
a9e2e0ae 3225 return emulate_ud(ctxt);
a836839c
HW
3226
3227 ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux);
fb6d4d34
PB
3228 ctxt->dst.val = tsc_aux;
3229 return X86EMUL_CONTINUE;
3230}
3231
48bb5d3c
AK
3232static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3233{
48bb5d3c
AK
3234 u64 tsc = 0;
3235
717746e3 3236 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
dd856efa
AK
3237 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3238 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
48bb5d3c
AK
3239 return X86EMUL_CONTINUE;
3240}
3241
222d21aa
AK
3242static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3243{
3244 u64 pmc;
3245
dd856efa 3246 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
222d21aa 3247 return emulate_gp(ctxt, 0);
dd856efa
AK
3248 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3249 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
222d21aa
AK
3250 return X86EMUL_CONTINUE;
3251}
3252
b9eac5f4
AK
3253static int em_mov(struct x86_emulate_ctxt *ctxt)
3254{
54cfdb3e 3255 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
b9eac5f4
AK
3256 return X86EMUL_CONTINUE;
3257}
3258
84cffe49
BP
3259static int em_movbe(struct x86_emulate_ctxt *ctxt)
3260{
84cffe49
BP
3261 u16 tmp;
3262
5ae78e95 3263 if (!ctxt->ops->guest_has_movbe(ctxt))
84cffe49
BP
3264 return emulate_ud(ctxt);
3265
3266 switch (ctxt->op_bytes) {
3267 case 2:
3268 /*
3269 * From MOVBE definition: "...When the operand size is 16 bits,
3270 * the upper word of the destination register remains unchanged
3271 * ..."
3272 *
3273 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3274 * rules so we have to do the operation almost per hand.
3275 */
3276 tmp = (u16)ctxt->src.val;
3277 ctxt->dst.val &= ~0xffffUL;
3278 ctxt->dst.val |= (unsigned long)swab16(tmp);
3279 break;
3280 case 4:
3281 ctxt->dst.val = swab32((u32)ctxt->src.val);
3282 break;
3283 case 8:
3284 ctxt->dst.val = swab64(ctxt->src.val);
3285 break;
3286 default:
592f0858 3287 BUG();
84cffe49
BP
3288 }
3289 return X86EMUL_CONTINUE;
3290}
3291
bc00f8d2
TY
3292static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3293{
ad8f9e69
ML
3294 int cr_num = ctxt->modrm_reg;
3295 int r;
3296
3297 if (ctxt->ops->set_cr(ctxt, cr_num, ctxt->src.val))
bc00f8d2
TY
3298 return emulate_gp(ctxt, 0);
3299
3300 /* Disable writeback. */
3301 ctxt->dst.type = OP_NONE;
ad8f9e69
ML
3302
3303 if (cr_num == 0) {
3304 /*
3305 * CR0 write might have updated CR0.PE and/or CR0.PG
3306 * which can affect the cpu's execution mode.
3307 */
3308 r = emulator_recalc_and_set_mode(ctxt);
3309 if (r != X86EMUL_CONTINUE)
3310 return r;
3311 }
3312
bc00f8d2
TY
3313 return X86EMUL_CONTINUE;
3314}
3315
3316static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3317{
3318 unsigned long val;
3319
3320 if (ctxt->mode == X86EMUL_MODE_PROT64)
3321 val = ctxt->src.val & ~0ULL;
3322 else
3323 val = ctxt->src.val & ~0U;
3324
3325 /* #UD condition is already handled. */
3326 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3327 return emulate_gp(ctxt, 0);
3328
3329 /* Disable writeback. */
3330 ctxt->dst.type = OP_NONE;
3331 return X86EMUL_CONTINUE;
3332}
3333
e1e210b0
TY
3334static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3335{
1ae09954 3336 u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
e1e210b0 3337 u64 msr_data;
1ae09954 3338 int r;
e1e210b0 3339
dd856efa
AK
3340 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3341 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
ac8d6cad 3342 r = ctxt->ops->set_msr_with_filter(ctxt, msr_index, msr_data);
1ae09954 3343
36d546d5 3344 if (r == X86EMUL_PROPAGATE_FAULT)
e1e210b0
TY
3345 return emulate_gp(ctxt, 0);
3346
36d546d5 3347 return r;
e1e210b0
TY
3348}
3349
3350static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3351{
1ae09954 3352 u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
e1e210b0 3353 u64 msr_data;
1ae09954
AG
3354 int r;
3355
ac8d6cad 3356 r = ctxt->ops->get_msr_with_filter(ctxt, msr_index, &msr_data);
1ae09954 3357
36d546d5 3358 if (r == X86EMUL_PROPAGATE_FAULT)
e1e210b0
TY
3359 return emulate_gp(ctxt, 0);
3360
36d546d5
HW
3361 if (r == X86EMUL_CONTINUE) {
3362 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3363 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3364 }
3365 return r;
e1e210b0
TY
3366}
3367
dd307d01 3368static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
1bd5f469 3369{
dd307d01
PB
3370 if (segment > VCPU_SREG_GS &&
3371 (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3372 ctxt->ops->cpl(ctxt) > 0)
3373 return emulate_gp(ctxt, 0);
1bd5f469 3374
dd307d01 3375 ctxt->dst.val = get_segment_selector(ctxt, segment);
b5bbf10e
NA
3376 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3377 ctxt->dst.bytes = 2;
1bd5f469
TY
3378 return X86EMUL_CONTINUE;
3379}
3380
dd307d01
PB
3381static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3382{
3383 if (ctxt->modrm_reg > VCPU_SREG_GS)
3384 return emulate_ud(ctxt);
3385
3386 return em_store_sreg(ctxt, ctxt->modrm_reg);
3387}
3388
1bd5f469
TY
3389static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3390{
9dac77fa 3391 u16 sel = ctxt->src.val;
1bd5f469 3392
9dac77fa 3393 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
1bd5f469
TY
3394 return emulate_ud(ctxt);
3395
9dac77fa 3396 if (ctxt->modrm_reg == VCPU_SREG_SS)
1bd5f469
TY
3397 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3398
3399 /* Disable writeback. */
9dac77fa
AK
3400 ctxt->dst.type = OP_NONE;
3401 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
1bd5f469
TY
3402}
3403
dd307d01
PB
3404static int em_sldt(struct x86_emulate_ctxt *ctxt)
3405{
3406 return em_store_sreg(ctxt, VCPU_SREG_LDTR);
3407}
3408
a14e579f
AK
3409static int em_lldt(struct x86_emulate_ctxt *ctxt)
3410{
3411 u16 sel = ctxt->src.val;
3412
3413 /* Disable writeback. */
3414 ctxt->dst.type = OP_NONE;
3415 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3416}
3417
dd307d01
PB
3418static int em_str(struct x86_emulate_ctxt *ctxt)
3419{
3420 return em_store_sreg(ctxt, VCPU_SREG_TR);
3421}
3422
80890006
AK
3423static int em_ltr(struct x86_emulate_ctxt *ctxt)
3424{
3425 u16 sel = ctxt->src.val;
3426
3427 /* Disable writeback. */
3428 ctxt->dst.type = OP_NONE;
3429 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3430}
3431
38503911
AK
3432static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3433{
9fa088f4
AK
3434 int rc;
3435 ulong linear;
3436
9dac77fa 3437 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
9fa088f4 3438 if (rc == X86EMUL_CONTINUE)
3cb16fe7 3439 ctxt->ops->invlpg(ctxt, linear);
38503911 3440 /* Disable writeback. */
9dac77fa 3441 ctxt->dst.type = OP_NONE;
38503911
AK
3442 return X86EMUL_CONTINUE;
3443}
3444
2d04a05b
AK
3445static int em_clts(struct x86_emulate_ctxt *ctxt)
3446{
3447 ulong cr0;
3448
3449 cr0 = ctxt->ops->get_cr(ctxt, 0);
3450 cr0 &= ~X86_CR0_TS;
3451 ctxt->ops->set_cr(ctxt, 0, cr0);
3452 return X86EMUL_CONTINUE;
3453}
3454
b34a8051 3455static int em_hypercall(struct x86_emulate_ctxt *ctxt)
26d05cc7 3456{
0f54a321 3457 int rc = ctxt->ops->fix_hypercall(ctxt);
26d05cc7 3458
26d05cc7
AK
3459 if (rc != X86EMUL_CONTINUE)
3460 return rc;
3461
3462 /* Let the processor re-execute the fixed hypercall */
9dac77fa 3463 ctxt->_eip = ctxt->eip;
26d05cc7 3464 /* Disable writeback. */
9dac77fa 3465 ctxt->dst.type = OP_NONE;
26d05cc7
AK
3466 return X86EMUL_CONTINUE;
3467}
3468
96051572
AK
3469static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3470 void (*get)(struct x86_emulate_ctxt *ctxt,
3471 struct desc_ptr *ptr))
3472{
3473 struct desc_ptr desc_ptr;
3474
ae3e61e1
PB
3475 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3476 ctxt->ops->cpl(ctxt) > 0)
3477 return emulate_gp(ctxt, 0);
3478
96051572
AK
3479 if (ctxt->mode == X86EMUL_MODE_PROT64)
3480 ctxt->op_bytes = 8;
3481 get(ctxt, &desc_ptr);
3482 if (ctxt->op_bytes == 2) {
3483 ctxt->op_bytes = 4;
3484 desc_ptr.address &= 0x00ffffff;
3485 }
3486 /* Disable writeback. */
3487 ctxt->dst.type = OP_NONE;
129a72a0
SR
3488 return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3489 &desc_ptr, 2 + ctxt->op_bytes);
96051572
AK
3490}
3491
3492static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3493{
3494 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3495}
3496
3497static int em_sidt(struct x86_emulate_ctxt *ctxt)
3498{
3499 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3500}
3501
5b7f6a1e 3502static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
26d05cc7 3503{
26d05cc7
AK
3504 struct desc_ptr desc_ptr;
3505 int rc;
3506
510425ff
AK
3507 if (ctxt->mode == X86EMUL_MODE_PROT64)
3508 ctxt->op_bytes = 8;
9dac77fa 3509 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
26d05cc7 3510 &desc_ptr.size, &desc_ptr.address,
9dac77fa 3511 ctxt->op_bytes);
26d05cc7
AK
3512 if (rc != X86EMUL_CONTINUE)
3513 return rc;
9a9abf6b 3514 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
fd8cb433 3515 emul_is_noncanonical_address(desc_ptr.address, ctxt))
9a9abf6b 3516 return emulate_gp(ctxt, 0);
5b7f6a1e
NA
3517 if (lgdt)
3518 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3519 else
3520 ctxt->ops->set_idt(ctxt, &desc_ptr);
26d05cc7 3521 /* Disable writeback. */
9dac77fa 3522 ctxt->dst.type = OP_NONE;
26d05cc7
AK
3523 return X86EMUL_CONTINUE;
3524}
3525
5b7f6a1e
NA
3526static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3527{
3528 return em_lgdt_lidt(ctxt, true);
3529}
3530
26d05cc7
AK
3531static int em_lidt(struct x86_emulate_ctxt *ctxt)
3532{
5b7f6a1e 3533 return em_lgdt_lidt(ctxt, false);
26d05cc7
AK
3534}
3535
3536static int em_smsw(struct x86_emulate_ctxt *ctxt)
3537{
ae3e61e1
PB
3538 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3539 ctxt->ops->cpl(ctxt) > 0)
3540 return emulate_gp(ctxt, 0);
3541
32e94d06
NA
3542 if (ctxt->dst.type == OP_MEM)
3543 ctxt->dst.bytes = 2;
9dac77fa 3544 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
26d05cc7
AK
3545 return X86EMUL_CONTINUE;
3546}
3547
3548static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3549{
26d05cc7 3550 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
9dac77fa
AK
3551 | (ctxt->src.val & 0x0f));
3552 ctxt->dst.type = OP_NONE;
26d05cc7
AK
3553 return X86EMUL_CONTINUE;
3554}
3555
d06e03ad
TY
3556static int em_loop(struct x86_emulate_ctxt *ctxt)
3557{
234f3ce4
NA
3558 int rc = X86EMUL_CONTINUE;
3559
01485a22 3560 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
dd856efa 3561 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
9dac77fa 3562 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
234f3ce4 3563 rc = jmp_rel(ctxt, ctxt->src.val);
d06e03ad 3564
234f3ce4 3565 return rc;
d06e03ad
TY
3566}
3567
3568static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3569{
234f3ce4
NA
3570 int rc = X86EMUL_CONTINUE;
3571
dd856efa 3572 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
234f3ce4 3573 rc = jmp_rel(ctxt, ctxt->src.val);
d06e03ad 3574
234f3ce4 3575 return rc;
d06e03ad
TY
3576}
3577
d7841a4b
TY
3578static int em_in(struct x86_emulate_ctxt *ctxt)
3579{
3580 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3581 &ctxt->dst.val))
3582 return X86EMUL_IO_NEEDED;
3583
3584 return X86EMUL_CONTINUE;
3585}
3586
3587static int em_out(struct x86_emulate_ctxt *ctxt)
3588{
3589 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3590 &ctxt->src.val, 1);
3591 /* Disable writeback. */
3592 ctxt->dst.type = OP_NONE;
3593 return X86EMUL_CONTINUE;
3594}
3595
f411e6cd
TY
3596static int em_cli(struct x86_emulate_ctxt *ctxt)
3597{
3598 if (emulator_bad_iopl(ctxt))
3599 return emulate_gp(ctxt, 0);
3600
3601 ctxt->eflags &= ~X86_EFLAGS_IF;
3602 return X86EMUL_CONTINUE;
3603}
3604
3605static int em_sti(struct x86_emulate_ctxt *ctxt)
3606{
3607 if (emulator_bad_iopl(ctxt))
3608 return emulate_gp(ctxt, 0);
3609
3610 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3611 ctxt->eflags |= X86_EFLAGS_IF;
3612 return X86EMUL_CONTINUE;
3613}
3614
6d6eede4
AK
3615static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3616{
3617 u32 eax, ebx, ecx, edx;
db2336a8
KH
3618 u64 msr = 0;
3619
3620 ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3621 if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3622 ctxt->ops->cpl(ctxt)) {
3623 return emulate_gp(ctxt, 0);
3624 }
6d6eede4 3625
dd856efa
AK
3626 eax = reg_read(ctxt, VCPU_REGS_RAX);
3627 ecx = reg_read(ctxt, VCPU_REGS_RCX);
f91af517 3628 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
dd856efa
AK
3629 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3630 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3631 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3632 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
6d6eede4
AK
3633 return X86EMUL_CONTINUE;
3634}
3635
98f73630
PB
3636static int em_sahf(struct x86_emulate_ctxt *ctxt)
3637{
3638 u32 flags;
3639
0efb0440
NA
3640 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3641 X86_EFLAGS_SF;
98f73630
PB
3642 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3643
3644 ctxt->eflags &= ~0xffUL;
3645 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3646 return X86EMUL_CONTINUE;
3647}
3648
2dd7caa0
AK
3649static int em_lahf(struct x86_emulate_ctxt *ctxt)
3650{
dd856efa
AK
3651 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3652 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
2dd7caa0
AK
3653 return X86EMUL_CONTINUE;
3654}
3655
9299836e
AK
3656static int em_bswap(struct x86_emulate_ctxt *ctxt)
3657{
3658 switch (ctxt->op_bytes) {
3659#ifdef CONFIG_X86_64
3660 case 8:
3661 asm("bswap %0" : "+r"(ctxt->dst.val));
3662 break;
3663#endif
3664 default:
3665 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3666 break;
3667 }
3668 return X86EMUL_CONTINUE;
3669}
3670
13e457e0
NA
3671static int em_clflush(struct x86_emulate_ctxt *ctxt)
3672{
3673 /* emulating clflush regardless of cpuid */
3674 return X86EMUL_CONTINUE;
3675}
3676
51b958e5
DE
3677static int em_clflushopt(struct x86_emulate_ctxt *ctxt)
3678{
3679 /* emulating clflushopt regardless of cpuid */
3680 return X86EMUL_CONTINUE;
3681}
3682
2276b511
NA
3683static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3684{
3685 ctxt->dst.val = (s32) ctxt->src.val;
3686 return X86EMUL_CONTINUE;
3687}
3688
283c95d0
RK
3689static int check_fxsr(struct x86_emulate_ctxt *ctxt)
3690{
5ae78e95 3691 if (!ctxt->ops->guest_has_fxsr(ctxt))
283c95d0
RK
3692 return emulate_ud(ctxt);
3693
3694 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
3695 return emulate_nm(ctxt);
3696
3697 /*
3698 * Don't emulate a case that should never be hit, instead of working
3699 * around a lack of fxsave64/fxrstor64 on old compilers.
3700 */
3701 if (ctxt->mode >= X86EMUL_MODE_PROT64)
3702 return X86EMUL_UNHANDLEABLE;
3703
3704 return X86EMUL_CONTINUE;
3705}
3706
9d643f63
ND
3707/*
3708 * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
3709 * and restore MXCSR.
3710 */
3711static size_t __fxstate_size(int nregs)
3712{
3713 return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
3714}
3715
3716static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
3717{
3718 bool cr4_osfxsr;
3719 if (ctxt->mode == X86EMUL_MODE_PROT64)
3720 return __fxstate_size(16);
3721
3722 cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
3723 return __fxstate_size(cr4_osfxsr ? 8 : 0);
3724}
3725
283c95d0
RK
3726/*
3727 * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
3728 * 1) 16 bit mode
3729 * 2) 32 bit mode
3730 * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
3731 * preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
3732 * save and restore
3733 * 3) 64-bit mode with REX.W prefix
3734 * - like (2), but XMM 8-15 are being saved and restored
3735 * 4) 64-bit mode without REX.W prefix
3736 * - like (3), but FIP and FDP are 64 bit
3737 *
3738 * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
3739 * desired result. (4) is not emulated.
3740 *
3741 * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
3742 * and FPU DS) should match.
3743 */
3744static int em_fxsave(struct x86_emulate_ctxt *ctxt)
3745{
3746 struct fxregs_state fx_state;
283c95d0
RK
3747 int rc;
3748
3749 rc = check_fxsr(ctxt);
3750 if (rc != X86EMUL_CONTINUE)
3751 return rc;
3752
43e51464 3753 kvm_fpu_get();
a7baead7 3754
283c95d0
RK
3755 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
3756
43e51464 3757 kvm_fpu_put();
a7baead7 3758
283c95d0
RK
3759 if (rc != X86EMUL_CONTINUE)
3760 return rc;
3761
9d643f63
ND
3762 return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
3763 fxstate_size(ctxt));
283c95d0
RK
3764}
3765
4d772cb8
DH
3766/*
3767 * FXRSTOR might restore XMM registers not provided by the guest. Fill
3768 * in the host registers (via FXSAVE) instead, so they won't be modified.
3769 * (preemption has to stay disabled until FXRSTOR).
3770 *
3771 * Use noinline to keep the stack for other functions called by callers small.
3772 */
3773static noinline int fxregs_fixup(struct fxregs_state *fx_state,
3774 const size_t used_size)
3775{
3776 struct fxregs_state fx_tmp;
3777 int rc;
3778
3779 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
3780 memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
3781 __fxstate_size(16) - used_size);
3782
3783 return rc;
3784}
3785
283c95d0
RK
3786static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
3787{
3788 struct fxregs_state fx_state;
3789 int rc;
9d643f63 3790 size_t size;
283c95d0
RK
3791
3792 rc = check_fxsr(ctxt);
3793 if (rc != X86EMUL_CONTINUE)
3794 return rc;
3795
4d772cb8
DH
3796 size = fxstate_size(ctxt);
3797 rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
3798 if (rc != X86EMUL_CONTINUE)
3799 return rc;
3800
43e51464 3801 kvm_fpu_get();
a7baead7 3802
9d643f63 3803 if (size < __fxstate_size(16)) {
4d772cb8 3804 rc = fxregs_fixup(&fx_state, size);
9d643f63
ND
3805 if (rc != X86EMUL_CONTINUE)
3806 goto out;
3807 }
283c95d0 3808
9d643f63
ND
3809 if (fx_state.mxcsr >> 16) {
3810 rc = emulate_gp(ctxt, 0);
3811 goto out;
3812 }
283c95d0
RK
3813
3814 if (rc == X86EMUL_CONTINUE)
3815 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
3816
9d643f63 3817out:
43e51464 3818 kvm_fpu_put();
a7baead7 3819
283c95d0
RK
3820 return rc;
3821}
3822
02d4160f
VK
3823static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
3824{
3825 u32 eax, ecx, edx;
3826
50b2d49b
SC
3827 if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSXSAVE))
3828 return emulate_ud(ctxt);
3829
02d4160f
VK
3830 eax = reg_read(ctxt, VCPU_REGS_RAX);
3831 edx = reg_read(ctxt, VCPU_REGS_RDX);
3832 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3833
3834 if (ctxt->ops->set_xcr(ctxt, ecx, ((u64)edx << 32) | eax))
3835 return emulate_gp(ctxt, 0);
3836
3837 return X86EMUL_CONTINUE;
3838}
3839
cfec82cb
JR
3840static bool valid_cr(int nr)
3841{
3842 switch (nr) {
3843 case 0:
3844 case 2 ... 4:
3845 case 8:
3846 return true;
3847 default:
3848 return false;
3849 }
3850}
3851
d0fe7b64 3852static int check_cr_access(struct x86_emulate_ctxt *ctxt)
cfec82cb 3853{
9dac77fa 3854 if (!valid_cr(ctxt->modrm_reg))
cfec82cb
JR
3855 return emulate_ud(ctxt);
3856
3857 return X86EMUL_CONTINUE;
3858}
3859
3b88e41a
JR
3860static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3861{
3862 unsigned long dr7;
3863
717746e3 3864 ctxt->ops->get_dr(ctxt, 7, &dr7);
3b88e41a 3865
0701ec90 3866 return dr7 & DR7_GD;
3b88e41a
JR
3867}
3868
3869static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3870{
9dac77fa 3871 int dr = ctxt->modrm_reg;
3b88e41a
JR
3872 u64 cr4;
3873
3874 if (dr > 7)
3875 return emulate_ud(ctxt);
3876
717746e3 3877 cr4 = ctxt->ops->get_cr(ctxt, 4);
3b88e41a
JR
3878 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3879 return emulate_ud(ctxt);
3880
6d2a0526
NA
3881 if (check_dr7_gd(ctxt)) {
3882 ulong dr6;
3883
3884 ctxt->ops->get_dr(ctxt, 6, &dr6);
1fc5d194 3885 dr6 &= ~DR_TRAP_BITS;
9a3ecd5e 3886 dr6 |= DR6_BD | DR6_ACTIVE_LOW;
6d2a0526 3887 ctxt->ops->set_dr(ctxt, 6, dr6);
3b88e41a 3888 return emulate_db(ctxt);
6d2a0526 3889 }
3b88e41a
JR
3890
3891 return X86EMUL_CONTINUE;
3892}
3893
3894static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3895{
9dac77fa
AK
3896 u64 new_val = ctxt->src.val64;
3897 int dr = ctxt->modrm_reg;
3b88e41a
JR
3898
3899 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3900 return emulate_gp(ctxt, 0);
3901
3902 return check_dr_read(ctxt);
3903}
3904
01de8b09
JR
3905static int check_svme(struct x86_emulate_ctxt *ctxt)
3906{
92ceb767 3907 u64 efer = 0;
01de8b09 3908
717746e3 3909 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
01de8b09
JR
3910
3911 if (!(efer & EFER_SVME))
3912 return emulate_ud(ctxt);
3913
3914 return X86EMUL_CONTINUE;
3915}
3916
3917static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3918{
dd856efa 3919 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
01de8b09
JR
3920
3921 /* Valid physical address? */
d4224449 3922 if (rax & 0xffff000000000000ULL)
01de8b09
JR
3923 return emulate_gp(ctxt, 0);
3924
3925 return check_svme(ctxt);
3926}
3927
d7eb8203
JR
3928static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3929{
717746e3 3930 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
d7eb8203 3931
717746e3 3932 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
e9337c84 3933 return emulate_gp(ctxt, 0);
d7eb8203
JR
3934
3935 return X86EMUL_CONTINUE;
3936}
3937
8061252e
JR
3938static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3939{
717746e3 3940 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
dd856efa 3941 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
8061252e 3942
2d7921c4
AM
3943 /*
3944 * VMware allows access to these Pseduo-PMCs even when read via RDPMC
3945 * in Ring3 when CR4.PCE=0.
3946 */
3947 if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
3948 return X86EMUL_CONTINUE;
3949
9ae7f6c9
WL
3950 /*
3951 * If CR4.PCE is set, the SDM requires CPL=0 or CR0.PE=0. The CR0.PE
3952 * check however is unnecessary because CPL is always 0 outside
3953 * protected mode.
3954 */
717746e3 3955 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
67f4d428 3956 ctxt->ops->check_pmc(ctxt, rcx))
8061252e
JR
3957 return emulate_gp(ctxt, 0);
3958
3959 return X86EMUL_CONTINUE;
3960}
3961
f6511935
JR
3962static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3963{
9dac77fa
AK
3964 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3965 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
f6511935
JR
3966 return emulate_gp(ctxt, 0);
3967
3968 return X86EMUL_CONTINUE;
3969}
3970
3971static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3972{
9dac77fa
AK
3973 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3974 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
f6511935
JR
3975 return emulate_gp(ctxt, 0);
3976
3977 return X86EMUL_CONTINUE;
3978}
3979
73fba5f4 3980#define D(_y) { .flags = (_y) }
d40a6898
PB
3981#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3982#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3983 .intercept = x86_intercept_##_i, .check_perm = (_p) }
0b789eee 3984#define N D(NotImpl)
01de8b09 3985#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
1c2545be
TY
3986#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3987#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
39f062ff 3988#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
2276b511 3989#define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
045a282c 3990#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
73fba5f4 3991#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
e28bbd44 3992#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
c4f035c6 3993#define II(_f, _e, _i) \
d40a6898 3994 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
d09beabd 3995#define IIP(_f, _e, _i, _p) \
d40a6898
PB
3996 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3997 .intercept = x86_intercept_##_i, .check_perm = (_p) }
aa97bb48 3998#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
73fba5f4 3999
8d8f4e9f 4000#define D2bv(_f) D((_f) | ByteOp), D(_f)
f6511935 4001#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
8d8f4e9f 4002#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
f7857f35 4003#define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
d7841a4b
TY
4004#define I2bvIP(_f, _e, _i, _p) \
4005 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
8d8f4e9f 4006
fb864fbc
AK
4007#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4008 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4009 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
6230f7fc 4010
0f54a321
NA
4011static const struct opcode group7_rm0[] = {
4012 N,
b34a8051 4013 I(SrcNone | Priv | EmulateOnUD, em_hypercall),
0f54a321
NA
4014 N, N, N, N, N, N,
4015};
4016
fd0a0d82 4017static const struct opcode group7_rm1[] = {
1c2545be
TY
4018 DI(SrcNone | Priv, monitor),
4019 DI(SrcNone | Priv, mwait),
d7eb8203
JR
4020 N, N, N, N, N, N,
4021};
4022
02d4160f
VK
4023static const struct opcode group7_rm2[] = {
4024 N,
4025 II(ImplicitOps | Priv, em_xsetbv, xsetbv),
4026 N, N, N, N, N, N,
4027};
4028
fd0a0d82 4029static const struct opcode group7_rm3[] = {
1c2545be 4030 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
b34a8051 4031 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
1c2545be
TY
4032 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
4033 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
4034 DIP(SrcNone | Prot | Priv, stgi, check_svme),
4035 DIP(SrcNone | Prot | Priv, clgi, check_svme),
4036 DIP(SrcNone | Prot | Priv, skinit, check_svme),
4037 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
01de8b09 4038};
6230f7fc 4039
fd0a0d82 4040static const struct opcode group7_rm7[] = {
d7eb8203 4041 N,
1c2545be 4042 DIP(SrcNone, rdtscp, check_rdtsc),
d7eb8203
JR
4043 N, N, N, N, N, N,
4044};
d67fc27a 4045
fd0a0d82 4046static const struct opcode group1[] = {
fb864fbc
AK
4047 F(Lock, em_add),
4048 F(Lock | PageTable, em_or),
4049 F(Lock, em_adc),
4050 F(Lock, em_sbb),
4051 F(Lock | PageTable, em_and),
4052 F(Lock, em_sub),
4053 F(Lock, em_xor),
4054 F(NoWrite, em_cmp),
73fba5f4
AK
4055};
4056
fd0a0d82 4057static const struct opcode group1A[] = {
0f89b207 4058 I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
73fba5f4
AK
4059};
4060
007a3b54
AK
4061static const struct opcode group2[] = {
4062 F(DstMem | ModRM, em_rol),
4063 F(DstMem | ModRM, em_ror),
4064 F(DstMem | ModRM, em_rcl),
4065 F(DstMem | ModRM, em_rcr),
4066 F(DstMem | ModRM, em_shl),
4067 F(DstMem | ModRM, em_shr),
4068 F(DstMem | ModRM, em_shl),
4069 F(DstMem | ModRM, em_sar),
4070};
4071
fd0a0d82 4072static const struct opcode group3[] = {
fb864fbc
AK
4073 F(DstMem | SrcImm | NoWrite, em_test),
4074 F(DstMem | SrcImm | NoWrite, em_test),
45a1467d
AK
4075 F(DstMem | SrcNone | Lock, em_not),
4076 F(DstMem | SrcNone | Lock, em_neg),
b9fa409b
AK
4077 F(DstXacc | Src2Mem, em_mul_ex),
4078 F(DstXacc | Src2Mem, em_imul_ex),
b8c0b6ae
AK
4079 F(DstXacc | Src2Mem, em_div_ex),
4080 F(DstXacc | Src2Mem, em_idiv_ex),
73fba5f4
AK
4081};
4082
fd0a0d82 4083static const struct opcode group4[] = {
95413dc4
AK
4084 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4085 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
73fba5f4
AK
4086 N, N, N, N, N, N,
4087};
4088
fd0a0d82 4089static const struct opcode group5[] = {
95413dc4
AK
4090 F(DstMem | SrcNone | Lock, em_inc),
4091 F(DstMem | SrcNone | Lock, em_dec),
018d70ff
EH
4092 I(SrcMem | NearBranch | IsBranch, em_call_near_abs),
4093 I(SrcMemFAddr | ImplicitOps | IsBranch, em_call_far),
4094 I(SrcMem | NearBranch | IsBranch, em_jmp_abs),
4095 I(SrcMemFAddr | ImplicitOps | IsBranch, em_jmp_far),
0f89b207 4096 I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined),
73fba5f4
AK
4097};
4098
fd0a0d82 4099static const struct opcode group6[] = {
dd307d01
PB
4100 II(Prot | DstMem, em_sldt, sldt),
4101 II(Prot | DstMem, em_str, str),
a14e579f 4102 II(Prot | Priv | SrcMem16, em_lldt, lldt),
80890006 4103 II(Prot | Priv | SrcMem16, em_ltr, ltr),
dee6bb70
JR
4104 N, N, N, N,
4105};
4106
fd0a0d82 4107static const struct group_dual group7 = { {
606b1c3e
NA
4108 II(Mov | DstMem, em_sgdt, sgdt),
4109 II(Mov | DstMem, em_sidt, sidt),
1c2545be
TY
4110 II(SrcMem | Priv, em_lgdt, lgdt),
4111 II(SrcMem | Priv, em_lidt, lidt),
4112 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4113 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4114 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
73fba5f4 4115}, {
0f54a321 4116 EXT(0, group7_rm0),
5ef39c71 4117 EXT(0, group7_rm1),
02d4160f
VK
4118 EXT(0, group7_rm2),
4119 EXT(0, group7_rm3),
1c2545be
TY
4120 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4121 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4122 EXT(0, group7_rm7),
73fba5f4
AK
4123} };
4124
fd0a0d82 4125static const struct opcode group8[] = {
73fba5f4 4126 N, N, N, N,
11c363ba
AK
4127 F(DstMem | SrcImmByte | NoWrite, em_bt),
4128 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
4129 F(DstMem | SrcImmByte | Lock, em_btr),
4130 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
73fba5f4
AK
4131};
4132
fb6d4d34
PB
4133/*
4134 * The "memory" destination is actually always a register, since we come
4135 * from the register case of group9.
4136 */
4137static const struct gprefix pfx_0f_c7_7 = {
2183de41 4138 N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdpid),
fb6d4d34
PB
4139};
4140
4141
fd0a0d82 4142static const struct group_dual group9 = { {
1c2545be 4143 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
73fba5f4 4144}, {
fb6d4d34
PB
4145 N, N, N, N, N, N, N,
4146 GP(0, &pfx_0f_c7_7),
73fba5f4
AK
4147} };
4148
fd0a0d82 4149static const struct opcode group11[] = {
1c2545be 4150 I(DstMem | SrcImm | Mov | PageTable, em_mov),
d5ae7ce8 4151 X7(D(Undefined)),
a4d4a7c1
AK
4152};
4153
13e457e0 4154static const struct gprefix pfx_0f_ae_7 = {
51b958e5 4155 I(SrcMem | ByteOp, em_clflush), I(SrcMem | ByteOp, em_clflushopt), N, N,
13e457e0
NA
4156};
4157
4158static const struct group_dual group15 = { {
283c95d0
RK
4159 I(ModRM | Aligned16, em_fxsave),
4160 I(ModRM | Aligned16, em_fxrstor),
4161 N, N, N, N, N, GP(0, &pfx_0f_ae_7),
13e457e0
NA
4162}, {
4163 N, N, N, N, N, N, N, N,
4164} };
4165
fd0a0d82 4166static const struct gprefix pfx_0f_6f_0f_7f = {
e5971755 4167 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
aa97bb48
AK
4168};
4169
39f062ff
NA
4170static const struct instr_dual instr_dual_0f_2b = {
4171 I(0, em_mov), N
4172};
4173
d5b77069 4174static const struct gprefix pfx_0f_2b = {
39f062ff 4175 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
3e114eb4
AK
4176};
4177
29916968
SF
4178static const struct gprefix pfx_0f_10_0f_11 = {
4179 I(Unaligned, em_mov), I(Unaligned, em_mov), N, N,
4180};
4181
27ce8258 4182static const struct gprefix pfx_0f_28_0f_29 = {
6fec27d8 4183 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
27ce8258
IM
4184};
4185
0a37027e
AW
4186static const struct gprefix pfx_0f_e7 = {
4187 N, I(Sse, em_mov), N, N,
4188};
4189
045a282c 4190static const struct escape escape_d9 = { {
16bebefe 4191 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
045a282c
GN
4192}, {
4193 /* 0xC0 - 0xC7 */
4194 N, N, N, N, N, N, N, N,
4195 /* 0xC8 - 0xCF */
4196 N, N, N, N, N, N, N, N,
4197 /* 0xD0 - 0xC7 */
4198 N, N, N, N, N, N, N, N,
4199 /* 0xD8 - 0xDF */
4200 N, N, N, N, N, N, N, N,
4201 /* 0xE0 - 0xE7 */
4202 N, N, N, N, N, N, N, N,
4203 /* 0xE8 - 0xEF */
4204 N, N, N, N, N, N, N, N,
4205 /* 0xF0 - 0xF7 */
4206 N, N, N, N, N, N, N, N,
4207 /* 0xF8 - 0xFF */
4208 N, N, N, N, N, N, N, N,
4209} };
4210
4211static const struct escape escape_db = { {
4212 N, N, N, N, N, N, N, N,
4213}, {
4214 /* 0xC0 - 0xC7 */
4215 N, N, N, N, N, N, N, N,
4216 /* 0xC8 - 0xCF */
4217 N, N, N, N, N, N, N, N,
4218 /* 0xD0 - 0xC7 */
4219 N, N, N, N, N, N, N, N,
4220 /* 0xD8 - 0xDF */
4221 N, N, N, N, N, N, N, N,
4222 /* 0xE0 - 0xE7 */
4223 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4224 /* 0xE8 - 0xEF */
4225 N, N, N, N, N, N, N, N,
4226 /* 0xF0 - 0xF7 */
4227 N, N, N, N, N, N, N, N,
4228 /* 0xF8 - 0xFF */
4229 N, N, N, N, N, N, N, N,
4230} };
4231
4232static const struct escape escape_dd = { {
16bebefe 4233 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
045a282c
GN
4234}, {
4235 /* 0xC0 - 0xC7 */
4236 N, N, N, N, N, N, N, N,
4237 /* 0xC8 - 0xCF */
4238 N, N, N, N, N, N, N, N,
4239 /* 0xD0 - 0xC7 */
4240 N, N, N, N, N, N, N, N,
4241 /* 0xD8 - 0xDF */
4242 N, N, N, N, N, N, N, N,
4243 /* 0xE0 - 0xE7 */
4244 N, N, N, N, N, N, N, N,
4245 /* 0xE8 - 0xEF */
4246 N, N, N, N, N, N, N, N,
4247 /* 0xF0 - 0xF7 */
4248 N, N, N, N, N, N, N, N,
4249 /* 0xF8 - 0xFF */
4250 N, N, N, N, N, N, N, N,
4251} };
4252
39f062ff
NA
4253static const struct instr_dual instr_dual_0f_c3 = {
4254 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4255};
4256
2276b511
NA
4257static const struct mode_dual mode_dual_63 = {
4258 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4259};
4260
4ac5b423
ML
4261static const struct instr_dual instr_dual_8d = {
4262 D(DstReg | SrcMem | ModRM | NoAccess), N
4263};
4264
fd0a0d82 4265static const struct opcode opcode_table[256] = {
73fba5f4 4266 /* 0x00 - 0x07 */
fb864fbc 4267 F6ALU(Lock, em_add),
1cd196ea
AK
4268 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4269 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
73fba5f4 4270 /* 0x08 - 0x0F */
fb864fbc 4271 F6ALU(Lock | PageTable, em_or),
1cd196ea
AK
4272 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4273 N,
73fba5f4 4274 /* 0x10 - 0x17 */
fb864fbc 4275 F6ALU(Lock, em_adc),
1cd196ea
AK
4276 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4277 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
73fba5f4 4278 /* 0x18 - 0x1F */
fb864fbc 4279 F6ALU(Lock, em_sbb),
1cd196ea
AK
4280 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4281 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
73fba5f4 4282 /* 0x20 - 0x27 */
fb864fbc 4283 F6ALU(Lock | PageTable, em_and), N, N,
73fba5f4 4284 /* 0x28 - 0x2F */
fb864fbc 4285 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
73fba5f4 4286 /* 0x30 - 0x37 */
fb864fbc 4287 F6ALU(Lock, em_xor), N, N,
73fba5f4 4288 /* 0x38 - 0x3F */
fb864fbc 4289 F6ALU(NoWrite, em_cmp), N, N,
73fba5f4 4290 /* 0x40 - 0x4F */
95413dc4 4291 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
73fba5f4 4292 /* 0x50 - 0x57 */
63540382 4293 X8(I(SrcReg | Stack, em_push)),
73fba5f4 4294 /* 0x58 - 0x5F */
c54fe504 4295 X8(I(DstReg | Stack, em_pop)),
73fba5f4 4296 /* 0x60 - 0x67 */
b96a7fad
TY
4297 I(ImplicitOps | Stack | No64, em_pusha),
4298 I(ImplicitOps | Stack | No64, em_popa),
2276b511 4299 N, MD(ModRM, &mode_dual_63),
73fba5f4
AK
4300 N, N, N, N,
4301 /* 0x68 - 0x6F */
d46164db
AK
4302 I(SrcImm | Mov | Stack, em_push),
4303 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
f3a1b9f4
AK
4304 I(SrcImmByte | Mov | Stack, em_push),
4305 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
b3356bf0 4306 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
2b5e97e1 4307 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
73fba5f4 4308 /* 0x70 - 0x7F */
018d70ff 4309 X16(D(SrcImmByte | NearBranch | IsBranch)),
73fba5f4 4310 /* 0x80 - 0x87 */
1c2545be
TY
4311 G(ByteOp | DstMem | SrcImm, group1),
4312 G(DstMem | SrcImm, group1),
4313 G(ByteOp | DstMem | SrcImm | No64, group1),
4314 G(DstMem | SrcImmByte, group1),
fb864fbc 4315 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
d5ae7ce8 4316 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
73fba5f4 4317 /* 0x88 - 0x8F */
d5ae7ce8 4318 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
b9eac5f4 4319 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
d5ae7ce8 4320 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4ac5b423 4321 ID(0, &instr_dual_8d),
1bd5f469
TY
4322 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4323 G(0, group1A),
73fba5f4 4324 /* 0x90 - 0x97 */
bf608f88 4325 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
73fba5f4 4326 /* 0x98 - 0x9F */
61429142 4327 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
018d70ff 4328 I(SrcImmFAddr | No64 | IsBranch, em_call_far), N,
62aaa2f0 4329 II(ImplicitOps | Stack, em_pushf, pushf),
98f73630
PB
4330 II(ImplicitOps | Stack, em_popf, popf),
4331 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
73fba5f4 4332 /* 0xA0 - 0xA7 */
b9eac5f4 4333 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
d5ae7ce8 4334 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
0f89b207
TL
4335 I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4336 F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
73fba5f4 4337 /* 0xA8 - 0xAF */
fb864fbc 4338 F2bv(DstAcc | SrcImm | NoWrite, em_test),
b9eac5f4
AK
4339 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4340 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
5aca3722 4341 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
73fba5f4 4342 /* 0xB0 - 0xB7 */
b9eac5f4 4343 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
73fba5f4 4344 /* 0xB8 - 0xBF */
5e2c6883 4345 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
73fba5f4 4346 /* 0xC0 - 0xC7 */
007a3b54 4347 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
018d70ff
EH
4348 I(ImplicitOps | NearBranch | SrcImmU16 | IsBranch, em_ret_near_imm),
4349 I(ImplicitOps | NearBranch | IsBranch, em_ret),
d4b4325f
AK
4350 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4351 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
a4d4a7c1 4352 G(ByteOp, group11), G(0, group11),
73fba5f4 4353 /* 0xC8 - 0xCF */
018d70ff
EH
4354 I(Stack | SrcImmU16 | Src2ImmByte | IsBranch, em_enter),
4355 I(Stack | IsBranch, em_leave),
4356 I(ImplicitOps | SrcImmU16 | IsBranch, em_ret_far_imm),
4357 I(ImplicitOps | IsBranch, em_ret_far),
4358 D(ImplicitOps | IsBranch), DI(SrcImmByte | IsBranch, intn),
4359 D(ImplicitOps | No64 | IsBranch),
4360 II(ImplicitOps | IsBranch, em_iret, iret),
73fba5f4 4361 /* 0xD0 - 0xD7 */
007a3b54
AK
4362 G(Src2One | ByteOp, group2), G(Src2One, group2),
4363 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
a035d5c6 4364 I(DstAcc | SrcImmUByte | No64, em_aam),
326f578f
PB
4365 I(DstAcc | SrcImmUByte | No64, em_aad),
4366 F(DstAcc | ByteOp | No64, em_salc),
7fa57952 4367 I(DstAcc | SrcXLat | ByteOp, em_mov),
73fba5f4 4368 /* 0xD8 - 0xDF */
045a282c 4369 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
73fba5f4 4370 /* 0xE0 - 0xE7 */
018d70ff
EH
4371 X3(I(SrcImmByte | NearBranch | IsBranch, em_loop)),
4372 I(SrcImmByte | NearBranch | IsBranch, em_jcxz),
d7841a4b
TY
4373 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4374 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
73fba5f4 4375 /* 0xE8 - 0xEF */
018d70ff
EH
4376 I(SrcImm | NearBranch | IsBranch, em_call),
4377 D(SrcImm | ImplicitOps | NearBranch | IsBranch),
4378 I(SrcImmFAddr | No64 | IsBranch, em_jmp_far),
4379 D(SrcImmByte | ImplicitOps | NearBranch | IsBranch),
d7841a4b
TY
4380 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4381 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
73fba5f4 4382 /* 0xF0 - 0xF7 */
bf608f88 4383 N, DI(ImplicitOps, icebp), N, N,
3c6e276f
AK
4384 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4385 G(ByteOp, group3), G(0, group3),
73fba5f4 4386 /* 0xF8 - 0xFF */
f411e6cd
TY
4387 D(ImplicitOps), D(ImplicitOps),
4388 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
73fba5f4
AK
4389 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4390};
4391
fd0a0d82 4392static const struct opcode twobyte_table[256] = {
73fba5f4 4393 /* 0x00 - 0x0F */
dee6bb70 4394 G(0, group6), GD(0, &group7), N, N,
018d70ff 4395 N, I(ImplicitOps | EmulateOnUD | IsBranch, em_syscall),
db5b0762 4396 II(ImplicitOps | Priv, em_clts, clts), N,
3c6e276f 4397 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
3f6f1480 4398 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
73fba5f4 4399 /* 0x10 - 0x1F */
29916968
SF
4400 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11),
4401 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11),
4402 N, N, N, N, N, N,
34d2618d
PB
4403 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 4 * prefetch + 4 * reserved NOP */
4404 D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4405 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4406 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4407 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4408 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* NOP + 7 * reserved NOP */
73fba5f4 4409 /* 0x20 - 0x2F */
d0fe7b64 4410 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_access),
9b88ae99
NA
4411 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4412 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
d0fe7b64 4413 check_cr_access),
9b88ae99
NA
4414 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4415 check_dr_write),
73fba5f4 4416 N, N, N, N,
27ce8258
IM
4417 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4418 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
d5b77069 4419 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
3e114eb4 4420 N, N, N, N,
73fba5f4 4421 /* 0x30 - 0x3F */
e1e210b0 4422 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
8061252e 4423 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
e1e210b0 4424 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
222d21aa 4425 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
018d70ff
EH
4426 I(ImplicitOps | EmulateOnUD | IsBranch, em_sysenter),
4427 I(ImplicitOps | Priv | EmulateOnUD | IsBranch, em_sysexit),
d867162c 4428 N, N,
73fba5f4
AK
4429 N, N, N, N, N, N, N, N,
4430 /* 0x40 - 0x4F */
140bad89 4431 X16(D(DstReg | SrcMem | ModRM)),
73fba5f4
AK
4432 /* 0x50 - 0x5F */
4433 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4434 /* 0x60 - 0x6F */
aa97bb48
AK
4435 N, N, N, N,
4436 N, N, N, N,
4437 N, N, N, N,
4438 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
73fba5f4 4439 /* 0x70 - 0x7F */
aa97bb48
AK
4440 N, N, N, N,
4441 N, N, N, N,
4442 N, N, N, N,
4443 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
73fba5f4 4444 /* 0x80 - 0x8F */
018d70ff 4445 X16(D(SrcImm | NearBranch | IsBranch)),
73fba5f4 4446 /* 0x90 - 0x9F */
ee45b58e 4447 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
73fba5f4 4448 /* 0xA0 - 0xA7 */
1cd196ea 4449 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
11c363ba
AK
4450 II(ImplicitOps, em_cpuid, cpuid),
4451 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
0bdea068
AK
4452 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4453 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
73fba5f4 4454 /* 0xA8 - 0xAF */
1cd196ea 4455 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
89651a3d 4456 II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
11c363ba 4457 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
0bdea068
AK
4458 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4459 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
13e457e0 4460 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
73fba5f4 4461 /* 0xB0 - 0xB7 */
2fcf5c8a 4462 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
d4b4325f 4463 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
11c363ba 4464 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
d4b4325f
AK
4465 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4466 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
2adb5ad9 4467 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
73fba5f4
AK
4468 /* 0xB8 - 0xBF */
4469 N, N,
ce7faab2 4470 G(BitOp, group8),
11c363ba 4471 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
900efe20
NA
4472 I(DstReg | SrcMem | ModRM, em_bsf_c),
4473 I(DstReg | SrcMem | ModRM, em_bsr_c),
2adb5ad9 4474 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
9299836e 4475 /* 0xC0 - 0xC7 */
e47a5f5f 4476 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
39f062ff 4477 N, ID(0, &instr_dual_0f_c3),
73fba5f4 4478 N, N, N, GD(0, &group9),
9299836e
AK
4479 /* 0xC8 - 0xCF */
4480 X8(I(DstReg, em_bswap)),
73fba5f4
AK
4481 /* 0xD0 - 0xDF */
4482 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4483 /* 0xE0 - 0xEF */
0a37027e
AW
4484 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4485 N, N, N, N, N, N, N, N,
73fba5f4
AK
4486 /* 0xF0 - 0xFF */
4487 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4488};
4489
39f062ff
NA
4490static const struct instr_dual instr_dual_0f_38_f0 = {
4491 I(DstReg | SrcMem | Mov, em_movbe), N
4492};
4493
4494static const struct instr_dual instr_dual_0f_38_f1 = {
4495 I(DstMem | SrcReg | Mov, em_movbe), N
4496};
4497
0bc5eedb 4498static const struct gprefix three_byte_0f_38_f0 = {
39f062ff 4499 ID(0, &instr_dual_0f_38_f0), N, N, N
0bc5eedb
BP
4500};
4501
4502static const struct gprefix three_byte_0f_38_f1 = {
39f062ff 4503 ID(0, &instr_dual_0f_38_f1), N, N, N
0bc5eedb
BP
4504};
4505
4506/*
4507 * Insns below are selected by the prefix which indexed by the third opcode
4508 * byte.
4509 */
4510static const struct opcode opcode_map_0f_38[256] = {
4511 /* 0x00 - 0x7f */
4512 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
84cffe49
BP
4513 /* 0x80 - 0xef */
4514 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4515 /* 0xf0 - 0xf1 */
53bb4f78
NA
4516 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4517 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
84cffe49
BP
4518 /* 0xf2 - 0xff */
4519 N, N, X4(N), X8(N)
0bc5eedb
BP
4520};
4521
73fba5f4
AK
4522#undef D
4523#undef N
4524#undef G
4525#undef GD
4526#undef I
aa97bb48 4527#undef GP
01de8b09 4528#undef EXT
2276b511 4529#undef MD
2b42fce6 4530#undef ID
73fba5f4 4531
8d8f4e9f 4532#undef D2bv
f6511935 4533#undef D2bvIP
8d8f4e9f 4534#undef I2bv
d7841a4b 4535#undef I2bvIP
d67fc27a 4536#undef I6ALU
8d8f4e9f 4537
9dac77fa 4538static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
39f21ee5
AK
4539{
4540 unsigned size;
4541
9dac77fa 4542 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
39f21ee5
AK
4543 if (size == 8)
4544 size = 4;
4545 return size;
4546}
4547
4548static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4549 unsigned size, bool sign_extension)
4550{
39f21ee5
AK
4551 int rc = X86EMUL_CONTINUE;
4552
4553 op->type = OP_IMM;
4554 op->bytes = size;
9dac77fa 4555 op->addr.mem.ea = ctxt->_eip;
39f21ee5
AK
4556 /* NB. Immediates are sign-extended as necessary. */
4557 switch (op->bytes) {
4558 case 1:
e85a1085 4559 op->val = insn_fetch(s8, ctxt);
39f21ee5
AK
4560 break;
4561 case 2:
e85a1085 4562 op->val = insn_fetch(s16, ctxt);
39f21ee5
AK
4563 break;
4564 case 4:
e85a1085 4565 op->val = insn_fetch(s32, ctxt);
39f21ee5 4566 break;
5e2c6883
NA
4567 case 8:
4568 op->val = insn_fetch(s64, ctxt);
4569 break;
39f21ee5
AK
4570 }
4571 if (!sign_extension) {
4572 switch (op->bytes) {
4573 case 1:
4574 op->val &= 0xff;
4575 break;
4576 case 2:
4577 op->val &= 0xffff;
4578 break;
4579 case 4:
4580 op->val &= 0xffffffff;
4581 break;
4582 }
4583 }
4584done:
4585 return rc;
4586}
4587
a9945549
AK
4588static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4589 unsigned d)
4590{
4591 int rc = X86EMUL_CONTINUE;
4592
4593 switch (d) {
4594 case OpReg:
2adb5ad9 4595 decode_register_operand(ctxt, op);
a9945549
AK
4596 break;
4597 case OpImmUByte:
608aabe3 4598 rc = decode_imm(ctxt, op, 1, false);
a9945549
AK
4599 break;
4600 case OpMem:
41ddf978 4601 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
0fe59128
AK
4602 mem_common:
4603 *op = ctxt->memop;
4604 ctxt->memopp = op;
96888977 4605 if (ctxt->d & BitOp)
a9945549
AK
4606 fetch_bit_operand(ctxt);
4607 op->orig_val = op->val;
4608 break;
41ddf978 4609 case OpMem64:
aaa05f24 4610 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
41ddf978 4611 goto mem_common;
a9945549
AK
4612 case OpAcc:
4613 op->type = OP_REG;
4614 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
dd856efa 4615 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
a9945549
AK
4616 fetch_register_operand(op);
4617 op->orig_val = op->val;
4618 break;
820207c8
AK
4619 case OpAccLo:
4620 op->type = OP_REG;
4621 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4622 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4623 fetch_register_operand(op);
4624 op->orig_val = op->val;
4625 break;
4626 case OpAccHi:
4627 if (ctxt->d & ByteOp) {
4628 op->type = OP_NONE;
4629 break;
4630 }
4631 op->type = OP_REG;
4632 op->bytes = ctxt->op_bytes;
4633 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4634 fetch_register_operand(op);
4635 op->orig_val = op->val;
4636 break;
a9945549
AK
4637 case OpDI:
4638 op->type = OP_MEM;
4639 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4640 op->addr.mem.ea =
01485a22 4641 register_address(ctxt, VCPU_REGS_RDI);
a9945549
AK
4642 op->addr.mem.seg = VCPU_SREG_ES;
4643 op->val = 0;
b3356bf0 4644 op->count = 1;
a9945549
AK
4645 break;
4646 case OpDX:
4647 op->type = OP_REG;
4648 op->bytes = 2;
dd856efa 4649 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
a9945549
AK
4650 fetch_register_operand(op);
4651 break;
4dd6a57d 4652 case OpCL:
d29b9d7e 4653 op->type = OP_IMM;
4dd6a57d 4654 op->bytes = 1;
dd856efa 4655 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4dd6a57d
AK
4656 break;
4657 case OpImmByte:
4658 rc = decode_imm(ctxt, op, 1, true);
4659 break;
4660 case OpOne:
d29b9d7e 4661 op->type = OP_IMM;
4dd6a57d
AK
4662 op->bytes = 1;
4663 op->val = 1;
4664 break;
4665 case OpImm:
4666 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4667 break;
5e2c6883
NA
4668 case OpImm64:
4669 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4670 break;
28867cee
AK
4671 case OpMem8:
4672 ctxt->memop.bytes = 1;
660696d1 4673 if (ctxt->memop.type == OP_REG) {
aa9ac1a6
GN
4674 ctxt->memop.addr.reg = decode_register(ctxt,
4675 ctxt->modrm_rm, true);
660696d1
GN
4676 fetch_register_operand(&ctxt->memop);
4677 }
28867cee 4678 goto mem_common;
0fe59128
AK
4679 case OpMem16:
4680 ctxt->memop.bytes = 2;
4681 goto mem_common;
4682 case OpMem32:
4683 ctxt->memop.bytes = 4;
4684 goto mem_common;
4685 case OpImmU16:
4686 rc = decode_imm(ctxt, op, 2, false);
4687 break;
4688 case OpImmU:
4689 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4690 break;
4691 case OpSI:
4692 op->type = OP_MEM;
4693 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4694 op->addr.mem.ea =
01485a22 4695 register_address(ctxt, VCPU_REGS_RSI);
573e80fe 4696 op->addr.mem.seg = ctxt->seg_override;
0fe59128 4697 op->val = 0;
b3356bf0 4698 op->count = 1;
0fe59128 4699 break;
7fa57952
PB
4700 case OpXLat:
4701 op->type = OP_MEM;
4702 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4703 op->addr.mem.ea =
01485a22 4704 address_mask(ctxt,
7fa57952
PB
4705 reg_read(ctxt, VCPU_REGS_RBX) +
4706 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
573e80fe 4707 op->addr.mem.seg = ctxt->seg_override;
7fa57952
PB
4708 op->val = 0;
4709 break;
0fe59128
AK
4710 case OpImmFAddr:
4711 op->type = OP_IMM;
4712 op->addr.mem.ea = ctxt->_eip;
4713 op->bytes = ctxt->op_bytes + 2;
4714 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4715 break;
4716 case OpMemFAddr:
4717 ctxt->memop.bytes = ctxt->op_bytes + 2;
4718 goto mem_common;
c191a7a0 4719 case OpES:
d29b9d7e 4720 op->type = OP_IMM;
c191a7a0
AK
4721 op->val = VCPU_SREG_ES;
4722 break;
4723 case OpCS:
d29b9d7e 4724 op->type = OP_IMM;
c191a7a0
AK
4725 op->val = VCPU_SREG_CS;
4726 break;
4727 case OpSS:
d29b9d7e 4728 op->type = OP_IMM;
c191a7a0
AK
4729 op->val = VCPU_SREG_SS;
4730 break;
4731 case OpDS:
d29b9d7e 4732 op->type = OP_IMM;
c191a7a0
AK
4733 op->val = VCPU_SREG_DS;
4734 break;
4735 case OpFS:
d29b9d7e 4736 op->type = OP_IMM;
c191a7a0
AK
4737 op->val = VCPU_SREG_FS;
4738 break;
4739 case OpGS:
d29b9d7e 4740 op->type = OP_IMM;
c191a7a0
AK
4741 op->val = VCPU_SREG_GS;
4742 break;
a9945549
AK
4743 case OpImplicit:
4744 /* Special instructions do their own operand decoding. */
4745 default:
4746 op->type = OP_NONE; /* Disable writeback. */
4747 break;
4748 }
4749
4750done:
4751 return rc;
4752}
4753
b35491e6 4754int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type)
dde7e6d1 4755{
dde7e6d1
AK
4756 int rc = X86EMUL_CONTINUE;
4757 int mode = ctxt->mode;
46561646 4758 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
0d7cdee8 4759 bool op_prefix = false;
573e80fe 4760 bool has_seg_override = false;
46561646 4761 struct opcode opcode;
3853be26
WL
4762 u16 dummy;
4763 struct desc_struct desc;
dde7e6d1 4764
f09ed83e
AK
4765 ctxt->memop.type = OP_NONE;
4766 ctxt->memopp = NULL;
9dac77fa 4767 ctxt->_eip = ctxt->eip;
17052f16
PB
4768 ctxt->fetch.ptr = ctxt->fetch.data;
4769 ctxt->fetch.end = ctxt->fetch.data + insn_len;
1ce19dc1 4770 ctxt->opcode_len = 1;
342993f9 4771 ctxt->intercept = x86_intercept_none;
dc25e89e 4772 if (insn_len > 0)
9dac77fa 4773 memcpy(ctxt->fetch.data, insn, insn_len);
285ca9e9 4774 else {
9506d57d 4775 rc = __do_insn_fetch_bytes(ctxt, 1);
285ca9e9 4776 if (rc != X86EMUL_CONTINUE)
016cd759 4777 goto done;
285ca9e9 4778 }
dde7e6d1
AK
4779
4780 switch (mode) {
4781 case X86EMUL_MODE_REAL:
4782 case X86EMUL_MODE_VM86:
3853be26
WL
4783 def_op_bytes = def_ad_bytes = 2;
4784 ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
4785 if (desc.d)
4786 def_op_bytes = def_ad_bytes = 4;
4787 break;
dde7e6d1
AK
4788 case X86EMUL_MODE_PROT16:
4789 def_op_bytes = def_ad_bytes = 2;
4790 break;
4791 case X86EMUL_MODE_PROT32:
4792 def_op_bytes = def_ad_bytes = 4;
4793 break;
4794#ifdef CONFIG_X86_64
4795 case X86EMUL_MODE_PROT64:
4796 def_op_bytes = 4;
4797 def_ad_bytes = 8;
4798 break;
4799#endif
4800 default:
1d2887e2 4801 return EMULATION_FAILED;
dde7e6d1
AK
4802 }
4803
9dac77fa
AK
4804 ctxt->op_bytes = def_op_bytes;
4805 ctxt->ad_bytes = def_ad_bytes;
dde7e6d1
AK
4806
4807 /* Legacy prefixes. */
4808 for (;;) {
e85a1085 4809 switch (ctxt->b = insn_fetch(u8, ctxt)) {
dde7e6d1 4810 case 0x66: /* operand-size override */
0d7cdee8 4811 op_prefix = true;
dde7e6d1 4812 /* switch between 2/4 bytes */
9dac77fa 4813 ctxt->op_bytes = def_op_bytes ^ 6;
dde7e6d1
AK
4814 break;
4815 case 0x67: /* address-size override */
4816 if (mode == X86EMUL_MODE_PROT64)
4817 /* switch between 4/8 bytes */
9dac77fa 4818 ctxt->ad_bytes = def_ad_bytes ^ 12;
dde7e6d1
AK
4819 else
4820 /* switch between 2/4 bytes */
9dac77fa 4821 ctxt->ad_bytes = def_ad_bytes ^ 6;
dde7e6d1
AK
4822 break;
4823 case 0x26: /* ES override */
125ffc5e
MP
4824 has_seg_override = true;
4825 ctxt->seg_override = VCPU_SREG_ES;
4826 break;
dde7e6d1 4827 case 0x2e: /* CS override */
125ffc5e
MP
4828 has_seg_override = true;
4829 ctxt->seg_override = VCPU_SREG_CS;
4830 break;
dde7e6d1 4831 case 0x36: /* SS override */
125ffc5e
MP
4832 has_seg_override = true;
4833 ctxt->seg_override = VCPU_SREG_SS;
4834 break;
dde7e6d1 4835 case 0x3e: /* DS override */
573e80fe 4836 has_seg_override = true;
125ffc5e 4837 ctxt->seg_override = VCPU_SREG_DS;
dde7e6d1
AK
4838 break;
4839 case 0x64: /* FS override */
125ffc5e
MP
4840 has_seg_override = true;
4841 ctxt->seg_override = VCPU_SREG_FS;
4842 break;
dde7e6d1 4843 case 0x65: /* GS override */
573e80fe 4844 has_seg_override = true;
125ffc5e 4845 ctxt->seg_override = VCPU_SREG_GS;
dde7e6d1
AK
4846 break;
4847 case 0x40 ... 0x4f: /* REX */
4848 if (mode != X86EMUL_MODE_PROT64)
4849 goto done_prefixes;
9dac77fa 4850 ctxt->rex_prefix = ctxt->b;
dde7e6d1
AK
4851 continue;
4852 case 0xf0: /* LOCK */
9dac77fa 4853 ctxt->lock_prefix = 1;
dde7e6d1
AK
4854 break;
4855 case 0xf2: /* REPNE/REPNZ */
dde7e6d1 4856 case 0xf3: /* REP/REPE/REPZ */
9dac77fa 4857 ctxt->rep_prefix = ctxt->b;
dde7e6d1
AK
4858 break;
4859 default:
4860 goto done_prefixes;
4861 }
4862
4863 /* Any legacy prefix after a REX prefix nullifies its effect. */
4864
9dac77fa 4865 ctxt->rex_prefix = 0;
dde7e6d1
AK
4866 }
4867
4868done_prefixes:
4869
4870 /* REX prefix. */
9dac77fa
AK
4871 if (ctxt->rex_prefix & 8)
4872 ctxt->op_bytes = 8; /* REX.W */
dde7e6d1
AK
4873
4874 /* Opcode byte(s). */
9dac77fa 4875 opcode = opcode_table[ctxt->b];
d3ad6243 4876 /* Two-byte opcode? */
9dac77fa 4877 if (ctxt->b == 0x0f) {
1ce19dc1 4878 ctxt->opcode_len = 2;
e85a1085 4879 ctxt->b = insn_fetch(u8, ctxt);
9dac77fa 4880 opcode = twobyte_table[ctxt->b];
0bc5eedb
BP
4881
4882 /* 0F_38 opcode map */
4883 if (ctxt->b == 0x38) {
4884 ctxt->opcode_len = 3;
4885 ctxt->b = insn_fetch(u8, ctxt);
4886 opcode = opcode_map_0f_38[ctxt->b];
4887 }
dde7e6d1 4888 }
9dac77fa 4889 ctxt->d = opcode.flags;
dde7e6d1 4890
9f4260e7
TY
4891 if (ctxt->d & ModRM)
4892 ctxt->modrm = insn_fetch(u8, ctxt);
4893
7fe864dc
NA
4894 /* vex-prefix instructions are not implemented */
4895 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
d14cb5df 4896 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
7fe864dc
NA
4897 ctxt->d = NotImpl;
4898 }
4899
9dac77fa
AK
4900 while (ctxt->d & GroupMask) {
4901 switch (ctxt->d & GroupMask) {
46561646 4902 case Group:
9dac77fa 4903 goffset = (ctxt->modrm >> 3) & 7;
46561646
AK
4904 opcode = opcode.u.group[goffset];
4905 break;
4906 case GroupDual:
9dac77fa
AK
4907 goffset = (ctxt->modrm >> 3) & 7;
4908 if ((ctxt->modrm >> 6) == 3)
46561646
AK
4909 opcode = opcode.u.gdual->mod3[goffset];
4910 else
4911 opcode = opcode.u.gdual->mod012[goffset];
4912 break;
4913 case RMExt:
9dac77fa 4914 goffset = ctxt->modrm & 7;
01de8b09 4915 opcode = opcode.u.group[goffset];
46561646
AK
4916 break;
4917 case Prefix:
9dac77fa 4918 if (ctxt->rep_prefix && op_prefix)
1d2887e2 4919 return EMULATION_FAILED;
9dac77fa 4920 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
46561646
AK
4921 switch (simd_prefix) {
4922 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4923 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4924 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4925 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4926 }
4927 break;
045a282c 4928 case Escape:
3c9053a2
MP
4929 if (ctxt->modrm > 0xbf) {
4930 size_t size = ARRAY_SIZE(opcode.u.esc->high);
4931 u32 index = array_index_nospec(
4932 ctxt->modrm - 0xc0, size);
4933
4934 opcode = opcode.u.esc->high[index];
4935 } else {
045a282c 4936 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
3c9053a2 4937 }
045a282c 4938 break;
39f062ff
NA
4939 case InstrDual:
4940 if ((ctxt->modrm >> 6) == 3)
4941 opcode = opcode.u.idual->mod3;
4942 else
4943 opcode = opcode.u.idual->mod012;
4944 break;
2276b511
NA
4945 case ModeDual:
4946 if (ctxt->mode == X86EMUL_MODE_PROT64)
4947 opcode = opcode.u.mdual->mode64;
4948 else
4949 opcode = opcode.u.mdual->mode32;
4950 break;
46561646 4951 default:
1d2887e2 4952 return EMULATION_FAILED;
0d7cdee8 4953 }
46561646 4954
b1ea50b2 4955 ctxt->d &= ~(u64)GroupMask;
9dac77fa 4956 ctxt->d |= opcode.flags;
0d7cdee8
AK
4957 }
4958
018d70ff
EH
4959 ctxt->is_branch = opcode.flags & IsBranch;
4960
e24186e0
PB
4961 /* Unrecognised? */
4962 if (ctxt->d == 0)
4963 return EMULATION_FAILED;
4964
9dac77fa 4965 ctxt->execute = opcode.u.execute;
dde7e6d1 4966
b35491e6
WL
4967 if (unlikely(emulation_type & EMULTYPE_TRAP_UD) &&
4968 likely(!(ctxt->d & EmulateOnUD)))
3a6095a0
NA
4969 return EMULATION_FAILED;
4970
d40a6898 4971 if (unlikely(ctxt->d &
ed9aad21
NA
4972 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
4973 No16))) {
d40a6898
PB
4974 /*
4975 * These are copied unconditionally here, and checked unconditionally
4976 * in x86_emulate_insn.
4977 */
4978 ctxt->check_perm = opcode.check_perm;
4979 ctxt->intercept = opcode.intercept;
dde7e6d1 4980
d40a6898
PB
4981 if (ctxt->d & NotImpl)
4982 return EMULATION_FAILED;
d867162c 4983
58b7075d
NA
4984 if (mode == X86EMUL_MODE_PROT64) {
4985 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4986 ctxt->op_bytes = 8;
4987 else if (ctxt->d & NearBranch)
4988 ctxt->op_bytes = 8;
4989 }
7f9b4b75 4990
d40a6898
PB
4991 if (ctxt->d & Op3264) {
4992 if (mode == X86EMUL_MODE_PROT64)
4993 ctxt->op_bytes = 8;
4994 else
4995 ctxt->op_bytes = 4;
4996 }
4997
ed9aad21
NA
4998 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
4999 ctxt->op_bytes = 4;
5000
d40a6898
PB
5001 if (ctxt->d & Sse)
5002 ctxt->op_bytes = 16;
5003 else if (ctxt->d & Mmx)
5004 ctxt->op_bytes = 8;
5005 }
1253791d 5006
dde7e6d1 5007 /* ModRM and SIB bytes. */
9dac77fa 5008 if (ctxt->d & ModRM) {
f09ed83e 5009 rc = decode_modrm(ctxt, &ctxt->memop);
573e80fe
BD
5010 if (!has_seg_override) {
5011 has_seg_override = true;
5012 ctxt->seg_override = ctxt->modrm_seg;
5013 }
9dac77fa 5014 } else if (ctxt->d & MemAbs)
f09ed83e 5015 rc = decode_abs(ctxt, &ctxt->memop);
dde7e6d1
AK
5016 if (rc != X86EMUL_CONTINUE)
5017 goto done;
5018
573e80fe
BD
5019 if (!has_seg_override)
5020 ctxt->seg_override = VCPU_SREG_DS;
dde7e6d1 5021
573e80fe 5022 ctxt->memop.addr.mem.seg = ctxt->seg_override;
dde7e6d1 5023
dde7e6d1
AK
5024 /*
5025 * Decode and fetch the source operand: register, memory
5026 * or immediate.
5027 */
0fe59128 5028 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
39f21ee5
AK
5029 if (rc != X86EMUL_CONTINUE)
5030 goto done;
5031
dde7e6d1
AK
5032 /*
5033 * Decode and fetch the second source operand: register, memory
5034 * or immediate.
5035 */
4dd6a57d 5036 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
39f21ee5
AK
5037 if (rc != X86EMUL_CONTINUE)
5038 goto done;
5039
dde7e6d1 5040 /* Decode and fetch the destination operand: register or memory. */
a9945549 5041 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
dde7e6d1 5042
d9092f52 5043 if (ctxt->rip_relative && likely(ctxt->memopp))
1c1c35ae
NA
5044 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5045 ctxt->memopp->addr.mem.ea + ctxt->_eip);
cb16c348 5046
a430c916 5047done:
c8848cee
JD
5048 if (rc == X86EMUL_PROPAGATE_FAULT)
5049 ctxt->have_exception = true;
1d2887e2 5050 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
dde7e6d1
AK
5051}
5052
1cb3f3ae
XG
5053bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5054{
5055 return ctxt->d & PageTable;
5056}
5057
3e2f65d5
GN
5058static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5059{
3e2f65d5
GN
5060 /* The second termination condition only applies for REPE
5061 * and REPNE. Test if the repeat string operation prefix is
5062 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5063 * corresponding termination condition according to:
5064 * - if REPE/REPZ and ZF = 0 then done
5065 * - if REPNE/REPNZ and ZF = 1 then done
5066 */
9dac77fa
AK
5067 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5068 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5069 && (((ctxt->rep_prefix == REPE_PREFIX) &&
0efb0440 5070 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
9dac77fa 5071 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
0efb0440 5072 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
3e2f65d5
GN
5073 return true;
5074
5075 return false;
5076}
5077
cbe2c9d3
AK
5078static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5079{
aabba3c6 5080 int rc;
cbe2c9d3 5081
43e51464 5082 kvm_fpu_get();
aabba3c6 5083 rc = asm_safe("fwait");
43e51464 5084 kvm_fpu_put();
cbe2c9d3 5085
aabba3c6 5086 if (unlikely(rc != X86EMUL_CONTINUE))
cbe2c9d3
AK
5087 return emulate_exception(ctxt, MF_VECTOR, 0, false);
5088
5089 return X86EMUL_CONTINUE;
5090}
5091
c0a21c3f 5092static void fetch_possible_mmx_operand(struct operand *op)
cbe2c9d3
AK
5093{
5094 if (op->type == OP_MM)
43e51464 5095 kvm_read_mmx_reg(op->addr.mm, &op->mm_val);
cbe2c9d3
AK
5096}
5097
3009afc6 5098static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop)
e28bbd44
AK
5099{
5100 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
4548f63e 5101
b9fa409b
AK
5102 if (!(ctxt->d & ByteOp))
5103 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
4548f63e 5104
1a29b5b7 5105 asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
b8c0b6ae 5106 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
1a29b5b7 5107 [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
b8c0b6ae 5108 : "c"(ctxt->src2.val));
4548f63e 5109
e28bbd44 5110 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
b8c0b6ae
AK
5111 if (!fop) /* exception is returned in fop variable */
5112 return emulate_de(ctxt);
e28bbd44
AK
5113 return X86EMUL_CONTINUE;
5114}
dd856efa 5115
1498507a
BD
5116void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5117{
73ab4a35
SC
5118 /* Clear fields that are set conditionally but read without a guard. */
5119 ctxt->rip_relative = false;
5120 ctxt->rex_prefix = 0;
5121 ctxt->lock_prefix = 0;
5122 ctxt->rep_prefix = 0;
5123 ctxt->regs_valid = 0;
5124 ctxt->regs_dirty = 0;
1498507a 5125
1498507a
BD
5126 ctxt->io_read.pos = 0;
5127 ctxt->io_read.end = 0;
1498507a
BD
5128 ctxt->mem_read.end = 0;
5129}
5130
7b105ca2 5131int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
8b4caf66 5132{
0225fb50 5133 const struct x86_emulate_ops *ops = ctxt->ops;
1b30eaa8 5134 int rc = X86EMUL_CONTINUE;
9dac77fa 5135 int saved_dst_type = ctxt->dst.type;
6ed071f0 5136 unsigned emul_flags;
8b4caf66 5137
9dac77fa 5138 ctxt->mem_read.pos = 0;
310b5d30 5139
e24186e0
PB
5140 /* LOCK prefix is allowed only with some instructions */
5141 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
35d3d4a1 5142 rc = emulate_ud(ctxt);
1161624f
GN
5143 goto done;
5144 }
5145
e24186e0 5146 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
35d3d4a1 5147 rc = emulate_ud(ctxt);
d380a5e4
GN
5148 goto done;
5149 }
5150
6ed071f0 5151 emul_flags = ctxt->ops->get_hflags(ctxt);
d40a6898
PB
5152 if (unlikely(ctxt->d &
5153 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5154 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5155 (ctxt->d & Undefined)) {
5156 rc = emulate_ud(ctxt);
5157 goto done;
5158 }
1253791d 5159
d40a6898
PB
5160 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5161 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5162 rc = emulate_ud(ctxt);
cbe2c9d3 5163 goto done;
d40a6898 5164 }
cbe2c9d3 5165
d40a6898
PB
5166 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5167 rc = emulate_nm(ctxt);
c4f035c6 5168 goto done;
d40a6898 5169 }
c4f035c6 5170
d40a6898
PB
5171 if (ctxt->d & Mmx) {
5172 rc = flush_pending_x87_faults(ctxt);
5173 if (rc != X86EMUL_CONTINUE)
5174 goto done;
5175 /*
5176 * Now that we know the fpu is exception safe, we can fetch
5177 * operands from it.
5178 */
c0a21c3f
SC
5179 fetch_possible_mmx_operand(&ctxt->src);
5180 fetch_possible_mmx_operand(&ctxt->src2);
d40a6898 5181 if (!(ctxt->d & Mov))
c0a21c3f 5182 fetch_possible_mmx_operand(&ctxt->dst);
d40a6898 5183 }
e92805ac 5184
6ed071f0 5185 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
d40a6898
PB
5186 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5187 X86_ICPT_PRE_EXCEPT);
5188 if (rc != X86EMUL_CONTINUE)
5189 goto done;
5190 }
8ea7d6ae 5191
64a38292
NA
5192 /* Instruction can only be executed in protected mode */
5193 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5194 rc = emulate_ud(ctxt);
5195 goto done;
5196 }
5197
d40a6898
PB
5198 /* Privileged instruction can be executed only in CPL=0 */
5199 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
68efa764
NA
5200 if (ctxt->d & PrivUD)
5201 rc = emulate_ud(ctxt);
5202 else
5203 rc = emulate_gp(ctxt, 0);
d09beabd 5204 goto done;
d40a6898 5205 }
d09beabd 5206
d40a6898 5207 /* Do instruction specific permission checks */
685bbf4a 5208 if (ctxt->d & CheckPerm) {
d40a6898
PB
5209 rc = ctxt->check_perm(ctxt);
5210 if (rc != X86EMUL_CONTINUE)
5211 goto done;
5212 }
5213
6ed071f0 5214 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
d40a6898
PB
5215 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5216 X86_ICPT_POST_EXCEPT);
5217 if (rc != X86EMUL_CONTINUE)
5218 goto done;
5219 }
5220
5221 if (ctxt->rep_prefix && (ctxt->d & String)) {
5222 /* All REP prefixes have the same first termination condition */
5223 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
428e3d08 5224 string_registers_quirk(ctxt);
d40a6898 5225 ctxt->eip = ctxt->_eip;
0efb0440 5226 ctxt->eflags &= ~X86_EFLAGS_RF;
d40a6898
PB
5227 goto done;
5228 }
b9fa9d6b 5229 }
b9fa9d6b
AK
5230 }
5231
9dac77fa
AK
5232 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5233 rc = segmented_read(ctxt, ctxt->src.addr.mem,
5234 ctxt->src.valptr, ctxt->src.bytes);
b60d513c 5235 if (rc != X86EMUL_CONTINUE)
8b4caf66 5236 goto done;
9dac77fa 5237 ctxt->src.orig_val64 = ctxt->src.val64;
8b4caf66
LV
5238 }
5239
9dac77fa
AK
5240 if (ctxt->src2.type == OP_MEM) {
5241 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5242 &ctxt->src2.val, ctxt->src2.bytes);
e35b7b9c
GN
5243 if (rc != X86EMUL_CONTINUE)
5244 goto done;
5245 }
5246
9dac77fa 5247 if ((ctxt->d & DstMask) == ImplicitOps)
8b4caf66
LV
5248 goto special_insn;
5249
5250
9dac77fa 5251 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
69f55cb1 5252 /* optimisation - avoid slow emulated read if Mov */
9dac77fa
AK
5253 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5254 &ctxt->dst.val, ctxt->dst.bytes);
c205fb7d 5255 if (rc != X86EMUL_CONTINUE) {
d44e1212
PB
5256 if (!(ctxt->d & NoWrite) &&
5257 rc == X86EMUL_PROPAGATE_FAULT &&
c205fb7d
NA
5258 ctxt->exception.vector == PF_VECTOR)
5259 ctxt->exception.error_code |= PFERR_WRITE_MASK;
69f55cb1 5260 goto done;
c205fb7d 5261 }
038e51de 5262 }
4ff6f8e6
PB
5263 /* Copy full 64-bit value for CMPXCHG8B. */
5264 ctxt->dst.orig_val64 = ctxt->dst.val64;
038e51de 5265
018a98db
AK
5266special_insn:
5267
6ed071f0 5268 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
9dac77fa 5269 rc = emulator_check_intercept(ctxt, ctxt->intercept,
8a76d7f2 5270 X86_ICPT_POST_MEMACCESS);
c4f035c6
AK
5271 if (rc != X86EMUL_CONTINUE)
5272 goto done;
5273 }
5274
b9a1ecb9 5275 if (ctxt->rep_prefix && (ctxt->d & String))
0efb0440 5276 ctxt->eflags |= X86_EFLAGS_RF;
b9a1ecb9 5277 else
0efb0440 5278 ctxt->eflags &= ~X86_EFLAGS_RF;
4467c3f1 5279
9dac77fa 5280 if (ctxt->execute) {
3009afc6 5281 if (ctxt->d & Fastop)
b78a8552 5282 rc = fastop(ctxt, ctxt->fop);
3009afc6 5283 else
52db3698 5284 rc = ctxt->execute(ctxt);
ef65c889
AK
5285 if (rc != X86EMUL_CONTINUE)
5286 goto done;
5287 goto writeback;
5288 }
5289
1ce19dc1 5290 if (ctxt->opcode_len == 2)
6aa8b732 5291 goto twobyte_insn;
0bc5eedb
BP
5292 else if (ctxt->opcode_len == 3)
5293 goto threebyte_insn;
6aa8b732 5294
9dac77fa 5295 switch (ctxt->b) {
b2833e3c 5296 case 0x70 ... 0x7f: /* jcc (short) */
9dac77fa 5297 if (test_cc(ctxt->b, ctxt->eflags))
234f3ce4 5298 rc = jmp_rel(ctxt, ctxt->src.val);
018a98db 5299 break;
7e0b54b1 5300 case 0x8d: /* lea r16/r32, m */
9dac77fa 5301 ctxt->dst.val = ctxt->src.addr.mem.ea;
7e0b54b1 5302 break;
3d9e77df 5303 case 0x90 ... 0x97: /* nop / xchg reg, rax */
dd856efa 5304 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
a825f5cc
NA
5305 ctxt->dst.type = OP_NONE;
5306 else
5307 rc = em_xchg(ctxt);
e4f973ae 5308 break;
e8b6fa70 5309 case 0x98: /* cbw/cwde/cdqe */
9dac77fa
AK
5310 switch (ctxt->op_bytes) {
5311 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5312 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5313 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
e8b6fa70
WY
5314 }
5315 break;
6e154e56 5316 case 0xcc: /* int3 */
5c5df76b
TY
5317 rc = emulate_int(ctxt, 3);
5318 break;
6e154e56 5319 case 0xcd: /* int n */
9dac77fa 5320 rc = emulate_int(ctxt, ctxt->src.val);
6e154e56
MG
5321 break;
5322 case 0xce: /* into */
0efb0440 5323 if (ctxt->eflags & X86_EFLAGS_OF)
5c5df76b 5324 rc = emulate_int(ctxt, 4);
6e154e56 5325 break;
1a52e051 5326 case 0xe9: /* jmp rel */
db5b0762 5327 case 0xeb: /* jmp rel short */
234f3ce4 5328 rc = jmp_rel(ctxt, ctxt->src.val);
9dac77fa 5329 ctxt->dst.type = OP_NONE; /* Disable writeback. */
1a52e051 5330 break;
111de5d6 5331 case 0xf4: /* hlt */
6c3287f7 5332 ctxt->ops->halt(ctxt);
19fdfa0d 5333 break;
111de5d6
AK
5334 case 0xf5: /* cmc */
5335 /* complement carry flag from eflags reg */
0efb0440 5336 ctxt->eflags ^= X86_EFLAGS_CF;
111de5d6
AK
5337 break;
5338 case 0xf8: /* clc */
0efb0440 5339 ctxt->eflags &= ~X86_EFLAGS_CF;
111de5d6 5340 break;
8744aa9a 5341 case 0xf9: /* stc */
0efb0440 5342 ctxt->eflags |= X86_EFLAGS_CF;
8744aa9a 5343 break;
fb4616f4 5344 case 0xfc: /* cld */
0efb0440 5345 ctxt->eflags &= ~X86_EFLAGS_DF;
fb4616f4
MG
5346 break;
5347 case 0xfd: /* std */
0efb0440 5348 ctxt->eflags |= X86_EFLAGS_DF;
fb4616f4 5349 break;
91269b8f
AK
5350 default:
5351 goto cannot_emulate;
6aa8b732 5352 }
018a98db 5353
7d9ddaed
AK
5354 if (rc != X86EMUL_CONTINUE)
5355 goto done;
5356
018a98db 5357writeback:
fb32b1ed
AK
5358 if (ctxt->d & SrcWrite) {
5359 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5360 rc = writeback(ctxt, &ctxt->src);
5361 if (rc != X86EMUL_CONTINUE)
5362 goto done;
5363 }
ee212297
NA
5364 if (!(ctxt->d & NoWrite)) {
5365 rc = writeback(ctxt, &ctxt->dst);
5366 if (rc != X86EMUL_CONTINUE)
5367 goto done;
5368 }
018a98db 5369
5cd21917
GN
5370 /*
5371 * restore dst type in case the decoding will be reused
5372 * (happens for string instruction )
5373 */
9dac77fa 5374 ctxt->dst.type = saved_dst_type;
5cd21917 5375
9dac77fa 5376 if ((ctxt->d & SrcMask) == SrcSI)
f3bd64c6 5377 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
a682e354 5378
9dac77fa 5379 if ((ctxt->d & DstMask) == DstDI)
f3bd64c6 5380 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
d9271123 5381
9dac77fa 5382 if (ctxt->rep_prefix && (ctxt->d & String)) {
b3356bf0 5383 unsigned int count;
9dac77fa 5384 struct read_cache *r = &ctxt->io_read;
b3356bf0
GN
5385 if ((ctxt->d & SrcMask) == SrcSI)
5386 count = ctxt->src.count;
5387 else
5388 count = ctxt->dst.count;
01485a22 5389 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
3e2f65d5 5390
d2ddd1c4
GN
5391 if (!string_insn_completed(ctxt)) {
5392 /*
5393 * Re-enter guest when pio read ahead buffer is empty
5394 * or, if it is not used, after each 1024 iteration.
5395 */
dd856efa 5396 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
d2ddd1c4
GN
5397 (r->end == 0 || r->end != r->pos)) {
5398 /*
5399 * Reset read cache. Usually happens before
5400 * decode, but since instruction is restarted
5401 * we have to do it here.
5402 */
9dac77fa 5403 ctxt->mem_read.end = 0;
dd856efa 5404 writeback_registers(ctxt);
d2ddd1c4
GN
5405 return EMULATION_RESTART;
5406 }
5407 goto done; /* skip rip writeback */
0fa6ccbd 5408 }
0efb0440 5409 ctxt->eflags &= ~X86_EFLAGS_RF;
5cd21917 5410 }
d2ddd1c4 5411
9dac77fa 5412 ctxt->eip = ctxt->_eip;
fede8076
PB
5413 if (ctxt->mode != X86EMUL_MODE_PROT64)
5414 ctxt->eip = (u32)ctxt->_eip;
018a98db
AK
5415
5416done:
e0ad0b47 5417 if (rc == X86EMUL_PROPAGATE_FAULT) {
49a1431d
SC
5418 if (KVM_EMULATOR_BUG_ON(ctxt->exception.vector > 0x1f, ctxt))
5419 return EMULATION_FAILED;
da9cb575 5420 ctxt->have_exception = true;
e0ad0b47 5421 }
775fde86
JR
5422 if (rc == X86EMUL_INTERCEPTED)
5423 return EMULATION_INTERCEPTED;
5424
dd856efa
AK
5425 if (rc == X86EMUL_CONTINUE)
5426 writeback_registers(ctxt);
5427
d2ddd1c4 5428 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
6aa8b732
AK
5429
5430twobyte_insn:
9dac77fa 5431 switch (ctxt->b) {
018a98db 5432 case 0x09: /* wbinvd */
cfb22375 5433 (ctxt->ops->wbinvd)(ctxt);
f5f48ee1
SY
5434 break;
5435 case 0x08: /* invd */
018a98db
AK
5436 case 0x0d: /* GrpP (prefetch) */
5437 case 0x18: /* Grp16 (prefetch/nop) */
103f98ea 5438 case 0x1f: /* nop */
018a98db
AK
5439 break;
5440 case 0x20: /* mov cr, reg */
9dac77fa 5441 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
018a98db 5442 break;
6aa8b732 5443 case 0x21: /* mov from dr to reg */
9dac77fa 5444 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
6aa8b732 5445 break;
6aa8b732 5446 case 0x40 ... 0x4f: /* cmov */
140bad89
NA
5447 if (test_cc(ctxt->b, ctxt->eflags))
5448 ctxt->dst.val = ctxt->src.val;
b91aa14d 5449 else if (ctxt->op_bytes != 4)
9dac77fa 5450 ctxt->dst.type = OP_NONE; /* no writeback */
6aa8b732 5451 break;
b2833e3c 5452 case 0x80 ... 0x8f: /* jnz rel, etc*/
9dac77fa 5453 if (test_cc(ctxt->b, ctxt->eflags))
234f3ce4 5454 rc = jmp_rel(ctxt, ctxt->src.val);
018a98db 5455 break;
ee45b58e 5456 case 0x90 ... 0x9f: /* setcc r/m8 */
9dac77fa 5457 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
ee45b58e 5458 break;
6aa8b732 5459 case 0xb6 ... 0xb7: /* movzx */
9dac77fa 5460 ctxt->dst.bytes = ctxt->op_bytes;
361cad2b 5461 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
9dac77fa 5462 : (u16) ctxt->src.val;
6aa8b732 5463 break;
6aa8b732 5464 case 0xbe ... 0xbf: /* movsx */
9dac77fa 5465 ctxt->dst.bytes = ctxt->op_bytes;
361cad2b 5466 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
9dac77fa 5467 (s16) ctxt->src.val;
6aa8b732 5468 break;
91269b8f
AK
5469 default:
5470 goto cannot_emulate;
6aa8b732 5471 }
7d9ddaed 5472
0bc5eedb
BP
5473threebyte_insn:
5474
7d9ddaed
AK
5475 if (rc != X86EMUL_CONTINUE)
5476 goto done;
5477
6aa8b732
AK
5478 goto writeback;
5479
5480cannot_emulate:
a0c0ab2f 5481 return EMULATION_FAILED;
6aa8b732 5482}
dd856efa
AK
5483
5484void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5485{
5486 invalidate_registers(ctxt);
5487}
5488
5489void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5490{
5491 writeback_registers(ctxt);
5492}
0f89b207
TL
5493
5494bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5495{
5496 if (ctxt->rep_prefix && (ctxt->d & String))
5497 return false;
5498
5499 if (ctxt->d & TwoMemOp)
5500 return false;
5501
5502 return true;
5503}