Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64...
[linux-2.6-block.git] / arch / x86 / kvm / emulate.c
CommitLineData
20c8ccb1 1// SPDX-License-Identifier: GPL-2.0-only
6aa8b732 2/******************************************************************************
56e82318 3 * emulate.c
6aa8b732
AK
4 *
5 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 *
7 * Copyright (c) 2005 Keir Fraser
8 *
9 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
dcc0766b 10 * privileged instructions:
6aa8b732
AK
11 *
12 * Copyright (C) 2006 Qumranet
9611c187 13 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
6aa8b732
AK
14 *
15 * Avi Kivity <avi@qumranet.com>
16 * Yaniv Kamay <yaniv@qumranet.com>
17 *
6aa8b732
AK
18 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
19 */
20
edf88417 21#include <linux/kvm_host.h>
5fdbf976 22#include "kvm_cache_regs.h"
56e82318 23#include <asm/kvm_emulate.h>
b7d491e7 24#include <linux/stringify.h>
3db176d5 25#include <asm/debugreg.h>
1a29b5b7 26#include <asm/nospec-branch.h>
6aa8b732 27
3eeb3288 28#include "x86.h"
38ba30ba 29#include "tss.h"
d1cd3ce9 30#include "mmu.h"
2d7921c4 31#include "pmu.h"
e99f0507 32
a9945549
AK
33/*
34 * Operand types
35 */
b1ea50b2
AK
36#define OpNone 0ull
37#define OpImplicit 1ull /* No generic decode */
38#define OpReg 2ull /* Register */
39#define OpMem 3ull /* Memory */
40#define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
41#define OpDI 5ull /* ES:DI/EDI/RDI */
42#define OpMem64 6ull /* Memory, 64-bit */
43#define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
44#define OpDX 8ull /* DX register */
4dd6a57d
AK
45#define OpCL 9ull /* CL register (for shifts) */
46#define OpImmByte 10ull /* 8-bit sign extended immediate */
47#define OpOne 11ull /* Implied 1 */
5e2c6883 48#define OpImm 12ull /* Sign extended up to 32-bit immediate */
0fe59128
AK
49#define OpMem16 13ull /* Memory operand (16-bit). */
50#define OpMem32 14ull /* Memory operand (32-bit). */
51#define OpImmU 15ull /* Immediate operand, zero extended */
52#define OpSI 16ull /* SI/ESI/RSI */
53#define OpImmFAddr 17ull /* Immediate far address */
54#define OpMemFAddr 18ull /* Far address in memory */
55#define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
c191a7a0
AK
56#define OpES 20ull /* ES */
57#define OpCS 21ull /* CS */
58#define OpSS 22ull /* SS */
59#define OpDS 23ull /* DS */
60#define OpFS 24ull /* FS */
61#define OpGS 25ull /* GS */
28867cee 62#define OpMem8 26ull /* 8-bit zero extended memory operand */
5e2c6883 63#define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
7fa57952 64#define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
820207c8
AK
65#define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
66#define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
0fe59128
AK
67
68#define OpBits 5 /* Width of operand field */
b1ea50b2 69#define OpMask ((1ull << OpBits) - 1)
a9945549 70
6aa8b732
AK
71/*
72 * Opcode effective-address decode tables.
73 * Note that we only emulate instructions that have at least one memory
74 * operand (excluding implicit stack references). We assume that stack
75 * references and instruction fetches will never occur in special memory
76 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
77 * not be handled.
78 */
79
80/* Operand sizes: 8-bit operands or specified/overridden size. */
ab85b12b 81#define ByteOp (1<<0) /* 8-bit operands. */
6aa8b732 82/* Destination operand type. */
a9945549
AK
83#define DstShift 1
84#define ImplicitOps (OpImplicit << DstShift)
85#define DstReg (OpReg << DstShift)
86#define DstMem (OpMem << DstShift)
87#define DstAcc (OpAcc << DstShift)
88#define DstDI (OpDI << DstShift)
89#define DstMem64 (OpMem64 << DstShift)
16bebefe 90#define DstMem16 (OpMem16 << DstShift)
a9945549
AK
91#define DstImmUByte (OpImmUByte << DstShift)
92#define DstDX (OpDX << DstShift)
820207c8 93#define DstAccLo (OpAccLo << DstShift)
a9945549 94#define DstMask (OpMask << DstShift)
6aa8b732 95/* Source operand type. */
0fe59128
AK
96#define SrcShift 6
97#define SrcNone (OpNone << SrcShift)
98#define SrcReg (OpReg << SrcShift)
99#define SrcMem (OpMem << SrcShift)
100#define SrcMem16 (OpMem16 << SrcShift)
101#define SrcMem32 (OpMem32 << SrcShift)
102#define SrcImm (OpImm << SrcShift)
103#define SrcImmByte (OpImmByte << SrcShift)
104#define SrcOne (OpOne << SrcShift)
105#define SrcImmUByte (OpImmUByte << SrcShift)
106#define SrcImmU (OpImmU << SrcShift)
107#define SrcSI (OpSI << SrcShift)
7fa57952 108#define SrcXLat (OpXLat << SrcShift)
0fe59128
AK
109#define SrcImmFAddr (OpImmFAddr << SrcShift)
110#define SrcMemFAddr (OpMemFAddr << SrcShift)
111#define SrcAcc (OpAcc << SrcShift)
112#define SrcImmU16 (OpImmU16 << SrcShift)
5e2c6883 113#define SrcImm64 (OpImm64 << SrcShift)
0fe59128 114#define SrcDX (OpDX << SrcShift)
28867cee 115#define SrcMem8 (OpMem8 << SrcShift)
820207c8 116#define SrcAccHi (OpAccHi << SrcShift)
0fe59128 117#define SrcMask (OpMask << SrcShift)
221192bd
MT
118#define BitOp (1<<11)
119#define MemAbs (1<<12) /* Memory operand is absolute displacement */
120#define String (1<<13) /* String instruction (rep capable) */
121#define Stack (1<<14) /* Stack instruction (push/pop) */
122#define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
123#define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
124#define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
125#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
126#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
045a282c 127#define Escape (5<<15) /* Escape to coprocessor instruction */
39f062ff 128#define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
2276b511 129#define ModeDual (7<<15) /* Different instruction for 32/64 bit */
221192bd 130#define Sse (1<<18) /* SSE Vector instruction */
20c29ff2
AK
131/* Generic ModRM decode. */
132#define ModRM (1<<19)
133/* Destination is only written; never read. */
134#define Mov (1<<20)
d8769fed 135/* Misc flags */
8ea7d6ae 136#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
b51e974f 137#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
5a506b12 138#define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
7f9b4b75 139#define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
047a4818 140#define Undefined (1<<25) /* No Such Instruction */
d380a5e4 141#define Lock (1<<26) /* lock prefix is allowed for the instruction */
e92805ac 142#define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
d8769fed 143#define No64 (1<<28)
d5ae7ce8 144#define PageTable (1 << 29) /* instruction used to write page table */
0b789eee 145#define NotImpl (1 << 30) /* instruction is not implemented */
0dc8d10f 146/* Source 2 operand type */
0b789eee 147#define Src2Shift (31)
4dd6a57d 148#define Src2None (OpNone << Src2Shift)
ab2c5ce6 149#define Src2Mem (OpMem << Src2Shift)
4dd6a57d
AK
150#define Src2CL (OpCL << Src2Shift)
151#define Src2ImmByte (OpImmByte << Src2Shift)
152#define Src2One (OpOne << Src2Shift)
153#define Src2Imm (OpImm << Src2Shift)
c191a7a0
AK
154#define Src2ES (OpES << Src2Shift)
155#define Src2CS (OpCS << Src2Shift)
156#define Src2SS (OpSS << Src2Shift)
157#define Src2DS (OpDS << Src2Shift)
158#define Src2FS (OpFS << Src2Shift)
159#define Src2GS (OpGS << Src2Shift)
4dd6a57d 160#define Src2Mask (OpMask << Src2Shift)
cbe2c9d3 161#define Mmx ((u64)1 << 40) /* MMX Vector instruction */
48520187 162#define AlignMask ((u64)7 << 41)
1c11b376 163#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
48520187
RK
164#define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */
165#define Avx ((u64)3 << 41) /* Advanced Vector Extensions */
166#define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
e28bbd44 167#define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
b6744dc3 168#define NoWrite ((u64)1 << 45) /* No writeback */
fb32b1ed 169#define SrcWrite ((u64)1 << 46) /* Write back src operand */
9b88ae99 170#define NoMod ((u64)1 << 47) /* Mod field is ignored */
d40a6898
PB
171#define Intercept ((u64)1 << 48) /* Has valid intercept field */
172#define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
68efa764 173#define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
58b7075d 174#define NearBranch ((u64)1 << 52) /* Near branches */
ed9aad21 175#define No16 ((u64)1 << 53) /* No 16 bit operand */
ab708099 176#define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
0f89b207 177#define TwoMemOp ((u64)1 << 55) /* Instruction has two memory operand */
6aa8b732 178
820207c8 179#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
6aa8b732 180
d0e53325
AK
181#define X2(x...) x, x
182#define X3(x...) X2(x), x
183#define X4(x...) X2(x), X2(x)
184#define X5(x...) X4(x), x
185#define X6(x...) X4(x), X2(x)
186#define X7(x...) X4(x), X3(x)
187#define X8(x...) X4(x), X4(x)
188#define X16(x...) X8(x), X8(x)
83babbca 189
e28bbd44
AK
190#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
191#define FASTOP_SIZE 8
192
193/*
194 * fastop functions have a special calling convention:
195 *
017da7b6
AK
196 * dst: rax (in/out)
197 * src: rdx (in/out)
e28bbd44
AK
198 * src2: rcx (in)
199 * flags: rflags (in/out)
b8c0b6ae 200 * ex: rsi (in:fastop pointer, out:zero if exception)
e28bbd44
AK
201 *
202 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
203 * different operand sizes can be reached by calculation, rather than a jump
204 * table (which would be bigger than the code).
205 *
206 * fastop functions are declared as taking a never-defined fastop parameter,
207 * so they can't be called from C directly.
208 */
209
210struct fastop;
211
d65b1dee 212struct opcode {
b1ea50b2
AK
213 u64 flags : 56;
214 u64 intercept : 8;
120df890 215 union {
ef65c889 216 int (*execute)(struct x86_emulate_ctxt *ctxt);
fd0a0d82
MK
217 const struct opcode *group;
218 const struct group_dual *gdual;
219 const struct gprefix *gprefix;
045a282c 220 const struct escape *esc;
39f062ff 221 const struct instr_dual *idual;
2276b511 222 const struct mode_dual *mdual;
e28bbd44 223 void (*fastop)(struct fastop *fake);
120df890 224 } u;
d09beabd 225 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
120df890
AK
226};
227
228struct group_dual {
229 struct opcode mod012[8];
230 struct opcode mod3[8];
d65b1dee
AK
231};
232
0d7cdee8
AK
233struct gprefix {
234 struct opcode pfx_no;
235 struct opcode pfx_66;
236 struct opcode pfx_f2;
237 struct opcode pfx_f3;
238};
239
045a282c
GN
240struct escape {
241 struct opcode op[8];
242 struct opcode high[64];
243};
244
39f062ff
NA
245struct instr_dual {
246 struct opcode mod012;
247 struct opcode mod3;
248};
249
2276b511
NA
250struct mode_dual {
251 struct opcode mode32;
252 struct opcode mode64;
253};
254
62bd430e 255#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
62bd430e 256
3dc4bc4f
NA
257enum x86_transfer_type {
258 X86_TRANSFER_NONE,
259 X86_TRANSFER_CALL_JMP,
260 X86_TRANSFER_RET,
261 X86_TRANSFER_TASK_SWITCH,
262};
263
dd856efa
AK
264static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
265{
266 if (!(ctxt->regs_valid & (1 << nr))) {
267 ctxt->regs_valid |= 1 << nr;
268 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
269 }
270 return ctxt->_regs[nr];
271}
272
273static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
274{
275 ctxt->regs_valid |= 1 << nr;
276 ctxt->regs_dirty |= 1 << nr;
277 return &ctxt->_regs[nr];
278}
279
280static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
281{
282 reg_read(ctxt, nr);
283 return reg_write(ctxt, nr);
284}
285
286static void writeback_registers(struct x86_emulate_ctxt *ctxt)
287{
288 unsigned reg;
289
290 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
291 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
292}
293
294static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
295{
296 ctxt->regs_dirty = 0;
297 ctxt->regs_valid = 0;
298}
299
6aa8b732
AK
300/*
301 * These EFLAGS bits are restored from saved value during emulation, and
302 * any changes are written back to the saved value after emulation.
303 */
0efb0440
NA
304#define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
305 X86_EFLAGS_PF|X86_EFLAGS_CF)
6aa8b732 306
dda96d8f
AK
307#ifdef CONFIG_X86_64
308#define ON64(x) x
309#else
310#define ON64(x)
311#endif
312
4d758349
AK
313static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
314
d99a6ce7 315#define __FOP_FUNC(name) \
1482a082
JP
316 ".align " __stringify(FASTOP_SIZE) " \n\t" \
317 ".type " name ", @function \n\t" \
318 name ":\n\t"
319
d99a6ce7
JP
320#define FOP_FUNC(name) \
321 __FOP_FUNC(#name)
322
323#define __FOP_RET(name) \
324 "ret \n\t" \
325 ".size " name ", .-" name "\n\t"
326
327#define FOP_RET(name) \
328 __FOP_RET(#name)
b7d491e7
AK
329
330#define FOP_START(op) \
331 extern void em_##op(struct fastop *fake); \
332 asm(".pushsection .text, \"ax\" \n\t" \
333 ".global em_" #op " \n\t" \
d99a6ce7
JP
334 ".align " __stringify(FASTOP_SIZE) " \n\t" \
335 "em_" #op ":\n\t"
b7d491e7
AK
336
337#define FOP_END \
338 ".popsection")
339
d99a6ce7
JP
340#define __FOPNOP(name) \
341 __FOP_FUNC(name) \
342 __FOP_RET(name)
343
1482a082 344#define FOPNOP() \
d99a6ce7 345 __FOPNOP(__stringify(__UNIQUE_ID(nop)))
0bdea068 346
b7d491e7 347#define FOP1E(op, dst) \
d99a6ce7
JP
348 __FOP_FUNC(#op "_" #dst) \
349 "10: " #op " %" #dst " \n\t" \
350 __FOP_RET(#op "_" #dst)
b8c0b6ae
AK
351
352#define FOP1EEX(op, dst) \
353 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
b7d491e7
AK
354
355#define FASTOP1(op) \
356 FOP_START(op) \
357 FOP1E(op##b, al) \
358 FOP1E(op##w, ax) \
359 FOP1E(op##l, eax) \
360 ON64(FOP1E(op##q, rax)) \
361 FOP_END
362
b9fa409b
AK
363/* 1-operand, using src2 (for MUL/DIV r/m) */
364#define FASTOP1SRC2(op, name) \
365 FOP_START(name) \
366 FOP1E(op, cl) \
367 FOP1E(op, cx) \
368 FOP1E(op, ecx) \
369 ON64(FOP1E(op, rcx)) \
370 FOP_END
371
b8c0b6ae
AK
372/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
373#define FASTOP1SRC2EX(op, name) \
374 FOP_START(name) \
375 FOP1EEX(op, cl) \
376 FOP1EEX(op, cx) \
377 FOP1EEX(op, ecx) \
378 ON64(FOP1EEX(op, rcx)) \
379 FOP_END
380
f7857f35 381#define FOP2E(op, dst, src) \
d99a6ce7
JP
382 __FOP_FUNC(#op "_" #dst "_" #src) \
383 #op " %" #src ", %" #dst " \n\t" \
384 __FOP_RET(#op "_" #dst "_" #src)
f7857f35
AK
385
386#define FASTOP2(op) \
387 FOP_START(op) \
017da7b6
AK
388 FOP2E(op##b, al, dl) \
389 FOP2E(op##w, ax, dx) \
390 FOP2E(op##l, eax, edx) \
391 ON64(FOP2E(op##q, rax, rdx)) \
f7857f35
AK
392 FOP_END
393
11c363ba
AK
394/* 2 operand, word only */
395#define FASTOP2W(op) \
396 FOP_START(op) \
397 FOPNOP() \
017da7b6
AK
398 FOP2E(op##w, ax, dx) \
399 FOP2E(op##l, eax, edx) \
400 ON64(FOP2E(op##q, rax, rdx)) \
11c363ba
AK
401 FOP_END
402
007a3b54
AK
403/* 2 operand, src is CL */
404#define FASTOP2CL(op) \
405 FOP_START(op) \
406 FOP2E(op##b, al, cl) \
407 FOP2E(op##w, ax, cl) \
408 FOP2E(op##l, eax, cl) \
409 ON64(FOP2E(op##q, rax, cl)) \
410 FOP_END
411
5aca3722
NA
412/* 2 operand, src and dest are reversed */
413#define FASTOP2R(op, name) \
414 FOP_START(name) \
415 FOP2E(op##b, dl, al) \
416 FOP2E(op##w, dx, ax) \
417 FOP2E(op##l, edx, eax) \
418 ON64(FOP2E(op##q, rdx, rax)) \
419 FOP_END
420
0bdea068 421#define FOP3E(op, dst, src, src2) \
d99a6ce7
JP
422 __FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
423 #op " %" #src2 ", %" #src ", %" #dst " \n\t"\
424 __FOP_RET(#op "_" #dst "_" #src "_" #src2)
0bdea068
AK
425
426/* 3-operand, word-only, src2=cl */
427#define FASTOP3WCL(op) \
428 FOP_START(op) \
429 FOPNOP() \
017da7b6
AK
430 FOP3E(op##w, ax, dx, cl) \
431 FOP3E(op##l, eax, edx, cl) \
432 ON64(FOP3E(op##q, rax, rdx, cl)) \
0bdea068
AK
433 FOP_END
434
9ae9feba 435/* Special case for SETcc - 1 instruction per cc */
1482a082
JP
436#define FOP_SETCC(op) \
437 ".align 4 \n\t" \
438 ".type " #op ", @function \n\t" \
439 #op ": \n\t" \
440 #op " %al \n\t" \
d99a6ce7 441 __FOP_RET(#op)
9ae9feba 442
f26e6016
JP
443asm(".pushsection .fixup, \"ax\"\n"
444 ".global kvm_fastop_exception \n"
445 "kvm_fastop_exception: xor %esi, %esi; ret\n"
446 ".popsection");
b8c0b6ae 447
9ae9feba
AK
448FOP_START(setcc)
449FOP_SETCC(seto)
450FOP_SETCC(setno)
451FOP_SETCC(setc)
452FOP_SETCC(setnc)
453FOP_SETCC(setz)
454FOP_SETCC(setnz)
455FOP_SETCC(setbe)
456FOP_SETCC(setnbe)
457FOP_SETCC(sets)
458FOP_SETCC(setns)
459FOP_SETCC(setp)
460FOP_SETCC(setnp)
461FOP_SETCC(setl)
462FOP_SETCC(setnl)
463FOP_SETCC(setle)
464FOP_SETCC(setnle)
465FOP_END;
466
d99a6ce7
JP
467FOP_START(salc)
468FOP_FUNC(salc)
469"pushf; sbb %al, %al; popf \n\t"
470FOP_RET(salc)
326f578f
PB
471FOP_END;
472
aabba3c6
RK
473/*
474 * XXX: inoutclob user must know where the argument is being expanded.
e9666d10 475 * Relying on CONFIG_CC_HAS_ASM_GOTO would allow us to remove _fault.
aabba3c6
RK
476 */
477#define asm_safe(insn, inoutclob...) \
478({ \
479 int _fault = 0; \
480 \
481 asm volatile("1:" insn "\n" \
482 "2:\n" \
483 ".pushsection .fixup, \"ax\"\n" \
484 "3: movl $1, %[_fault]\n" \
485 " jmp 2b\n" \
486 ".popsection\n" \
487 _ASM_EXTABLE(1b, 3b) \
488 : [_fault] "+qm"(_fault) inoutclob ); \
489 \
490 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
491})
492
8a76d7f2
JR
493static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
494 enum x86_intercept intercept,
495 enum x86_intercept_stage stage)
496{
497 struct x86_instruction_info info = {
498 .intercept = intercept,
9dac77fa
AK
499 .rep_prefix = ctxt->rep_prefix,
500 .modrm_mod = ctxt->modrm_mod,
501 .modrm_reg = ctxt->modrm_reg,
502 .modrm_rm = ctxt->modrm_rm,
503 .src_val = ctxt->src.val64,
6cbc5f5a 504 .dst_val = ctxt->dst.val64,
9dac77fa
AK
505 .src_bytes = ctxt->src.bytes,
506 .dst_bytes = ctxt->dst.bytes,
507 .ad_bytes = ctxt->ad_bytes,
8a76d7f2
JR
508 .next_rip = ctxt->eip,
509 };
510
2953538e 511 return ctxt->ops->intercept(ctxt, &info, stage);
8a76d7f2
JR
512}
513
f47cfa31
AK
514static void assign_masked(ulong *dest, ulong src, ulong mask)
515{
516 *dest = (*dest & ~mask) | (src & mask);
517}
518
6fd8e127
NA
519static void assign_register(unsigned long *reg, u64 val, int bytes)
520{
521 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
522 switch (bytes) {
523 case 1:
524 *(u8 *)reg = (u8)val;
525 break;
526 case 2:
527 *(u16 *)reg = (u16)val;
528 break;
529 case 4:
530 *reg = (u32)val;
531 break; /* 64b: zero-extend */
532 case 8:
533 *reg = val;
534 break;
535 }
536}
537
9dac77fa 538static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
ddcb2885 539{
9dac77fa 540 return (1UL << (ctxt->ad_bytes << 3)) - 1;
ddcb2885
HH
541}
542
f47cfa31
AK
543static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
544{
545 u16 sel;
546 struct desc_struct ss;
547
548 if (ctxt->mode == X86EMUL_MODE_PROT64)
549 return ~0UL;
550 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
551 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
552}
553
612e89f0
AK
554static int stack_size(struct x86_emulate_ctxt *ctxt)
555{
556 return (__fls(stack_mask(ctxt)) + 1) >> 3;
557}
558
6aa8b732 559/* Access/update address held in a register, based on addressing mode. */
e4706772 560static inline unsigned long
9dac77fa 561address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
e4706772 562{
9dac77fa 563 if (ctxt->ad_bytes == sizeof(unsigned long))
e4706772
HH
564 return reg;
565 else
9dac77fa 566 return reg & ad_mask(ctxt);
e4706772
HH
567}
568
569static inline unsigned long
01485a22 570register_address(struct x86_emulate_ctxt *ctxt, int reg)
e4706772 571{
01485a22 572 return address_mask(ctxt, reg_read(ctxt, reg));
e4706772
HH
573}
574
5ad105e5
AK
575static void masked_increment(ulong *reg, ulong mask, int inc)
576{
577 assign_masked(reg, *reg + inc, mask);
578}
579
7a957275 580static inline void
01485a22 581register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
7a957275 582{
ee122a71 583 ulong *preg = reg_rmw(ctxt, reg);
5ad105e5 584
ee122a71 585 assign_register(preg, *preg + inc, ctxt->ad_bytes);
5ad105e5
AK
586}
587
588static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
589{
dd856efa 590 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
7a957275 591}
6aa8b732 592
56697687
AK
593static u32 desc_limit_scaled(struct desc_struct *desc)
594{
595 u32 limit = get_desc_limit(desc);
596
597 return desc->g ? (limit << 12) | 0xfff : limit;
598}
599
7b105ca2 600static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
7a5b56df
AK
601{
602 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
603 return 0;
604
7b105ca2 605 return ctxt->ops->get_cached_segment_base(ctxt, seg);
7a5b56df
AK
606}
607
35d3d4a1
AK
608static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
609 u32 error, bool valid)
54b8486f 610{
e0ad0b47 611 WARN_ON(vec > 0x1f);
da9cb575
AK
612 ctxt->exception.vector = vec;
613 ctxt->exception.error_code = error;
614 ctxt->exception.error_code_valid = valid;
35d3d4a1 615 return X86EMUL_PROPAGATE_FAULT;
54b8486f
GN
616}
617
3b88e41a
JR
618static int emulate_db(struct x86_emulate_ctxt *ctxt)
619{
620 return emulate_exception(ctxt, DB_VECTOR, 0, false);
621}
622
35d3d4a1 623static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
54b8486f 624{
35d3d4a1 625 return emulate_exception(ctxt, GP_VECTOR, err, true);
54b8486f
GN
626}
627
618ff15d
AK
628static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
629{
630 return emulate_exception(ctxt, SS_VECTOR, err, true);
631}
632
35d3d4a1 633static int emulate_ud(struct x86_emulate_ctxt *ctxt)
54b8486f 634{
35d3d4a1 635 return emulate_exception(ctxt, UD_VECTOR, 0, false);
54b8486f
GN
636}
637
35d3d4a1 638static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
54b8486f 639{
35d3d4a1 640 return emulate_exception(ctxt, TS_VECTOR, err, true);
54b8486f
GN
641}
642
34d1f490
AK
643static int emulate_de(struct x86_emulate_ctxt *ctxt)
644{
35d3d4a1 645 return emulate_exception(ctxt, DE_VECTOR, 0, false);
34d1f490
AK
646}
647
1253791d
AK
648static int emulate_nm(struct x86_emulate_ctxt *ctxt)
649{
650 return emulate_exception(ctxt, NM_VECTOR, 0, false);
651}
652
1aa36616
AK
653static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
654{
655 u16 selector;
656 struct desc_struct desc;
657
658 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
659 return selector;
660}
661
662static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
663 unsigned seg)
664{
665 u16 dummy;
666 u32 base3;
667 struct desc_struct desc;
668
669 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
670 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
671}
672
1c11b376
AK
673/*
674 * x86 defines three classes of vector instructions: explicitly
675 * aligned, explicitly unaligned, and the rest, which change behaviour
676 * depending on whether they're AVX encoded or not.
677 *
678 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
d3fe959f
RK
679 * subject to the same check. FXSAVE and FXRSTOR are checked here too as their
680 * 512 bytes of data must be aligned to a 16 byte boundary.
1c11b376 681 */
d3fe959f 682static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
1c11b376 683{
48520187 684 u64 alignment = ctxt->d & AlignMask;
1c11b376 685
1c11b376 686 if (likely(size < 16))
d3fe959f 687 return 1;
1c11b376 688
48520187
RK
689 switch (alignment) {
690 case Unaligned:
691 case Avx:
d3fe959f 692 return 1;
48520187 693 case Aligned16:
d3fe959f 694 return 16;
48520187
RK
695 case Aligned:
696 default:
d3fe959f 697 return size;
48520187 698 }
1c11b376
AK
699}
700
d09155d2
PB
701static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
702 struct segmented_address addr,
703 unsigned *max_size, unsigned size,
704 bool write, bool fetch,
d50eaa18 705 enum x86emul_mode mode, ulong *linear)
52fd8b44 706{
618ff15d
AK
707 struct desc_struct desc;
708 bool usable;
52fd8b44 709 ulong la;
618ff15d 710 u32 lim;
1aa36616 711 u16 sel;
fd8cb433 712 u8 va_bits;
52fd8b44 713
7b105ca2 714 la = seg_base(ctxt, addr.seg) + addr.ea;
fd56e154 715 *max_size = 0;
d50eaa18 716 switch (mode) {
618ff15d 717 case X86EMUL_MODE_PROT64:
0c1d77f4 718 *linear = la;
fd8cb433
YZ
719 va_bits = ctxt_virt_addr_bits(ctxt);
720 if (get_canonical(la, va_bits) != la)
abc7d8a4 721 goto bad;
fd56e154 722
fd8cb433 723 *max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
fd56e154
PB
724 if (size > *max_size)
725 goto bad;
618ff15d
AK
726 break;
727 default:
0c1d77f4 728 *linear = la = (u32)la;
1aa36616
AK
729 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
730 addr.seg);
618ff15d
AK
731 if (!usable)
732 goto bad;
58b7825b
GN
733 /* code segment in protected mode or read-only data segment */
734 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
735 || !(desc.type & 2)) && write)
618ff15d
AK
736 goto bad;
737 /* unreadable code segment */
3d9b938e 738 if (!fetch && (desc.type & 8) && !(desc.type & 2))
618ff15d
AK
739 goto bad;
740 lim = desc_limit_scaled(&desc);
997b0412 741 if (!(desc.type & 8) && (desc.type & 4)) {
fc058680 742 /* expand-down segment */
fd56e154 743 if (addr.ea <= lim)
618ff15d
AK
744 goto bad;
745 lim = desc.d ? 0xffffffff : 0xffff;
618ff15d 746 }
997b0412
PB
747 if (addr.ea > lim)
748 goto bad;
bac15531
NA
749 if (lim == 0xffffffff)
750 *max_size = ~0u;
751 else {
752 *max_size = (u64)lim + 1 - addr.ea;
753 if (size > *max_size)
754 goto bad;
755 }
618ff15d
AK
756 break;
757 }
d3fe959f 758 if (la & (insn_alignment(ctxt, size) - 1))
1c11b376 759 return emulate_gp(ctxt, 0);
52fd8b44 760 return X86EMUL_CONTINUE;
618ff15d
AK
761bad:
762 if (addr.seg == VCPU_SREG_SS)
3606189f 763 return emulate_ss(ctxt, 0);
618ff15d 764 else
3606189f 765 return emulate_gp(ctxt, 0);
52fd8b44
AK
766}
767
3d9b938e
NE
768static int linearize(struct x86_emulate_ctxt *ctxt,
769 struct segmented_address addr,
770 unsigned size, bool write,
771 ulong *linear)
772{
fd56e154 773 unsigned max_size;
d50eaa18
NA
774 return __linearize(ctxt, addr, &max_size, size, write, false,
775 ctxt->mode, linear);
3d9b938e
NE
776}
777
d50eaa18
NA
778static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
779 enum x86emul_mode mode)
780{
781 ulong linear;
782 int rc;
783 unsigned max_size;
784 struct segmented_address addr = { .seg = VCPU_SREG_CS,
785 .ea = dst };
786
787 if (ctxt->op_bytes != sizeof(unsigned long))
788 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
789 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
790 if (rc == X86EMUL_CONTINUE)
791 ctxt->_eip = addr.ea;
792 return rc;
793}
794
795static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
796{
797 return assign_eip(ctxt, dst, ctxt->mode);
3d9b938e
NE
798}
799
d50eaa18
NA
800static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
801 const struct desc_struct *cs_desc)
802{
803 enum x86emul_mode mode = ctxt->mode;
82268083 804 int rc;
d50eaa18
NA
805
806#ifdef CONFIG_X86_64
82268083
NA
807 if (ctxt->mode >= X86EMUL_MODE_PROT16) {
808 if (cs_desc->l) {
809 u64 efer = 0;
d50eaa18 810
82268083
NA
811 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
812 if (efer & EFER_LMA)
813 mode = X86EMUL_MODE_PROT64;
814 } else
815 mode = X86EMUL_MODE_PROT32; /* temporary value */
d50eaa18
NA
816 }
817#endif
818 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
819 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
82268083
NA
820 rc = assign_eip(ctxt, dst, mode);
821 if (rc == X86EMUL_CONTINUE)
822 ctxt->mode = mode;
823 return rc;
d50eaa18
NA
824}
825
826static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
827{
828 return assign_eip_near(ctxt, ctxt->_eip + rel);
829}
3d9b938e 830
79367a65
PB
831static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
832 void *data, unsigned size)
833{
3c9fa24c 834 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
79367a65
PB
835}
836
837static int linear_write_system(struct x86_emulate_ctxt *ctxt,
838 ulong linear, void *data,
839 unsigned int size)
840{
3c9fa24c 841 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
79367a65
PB
842}
843
3ca3ac4d
AK
844static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
845 struct segmented_address addr,
846 void *data,
847 unsigned size)
848{
9fa088f4
AK
849 int rc;
850 ulong linear;
851
83b8795a 852 rc = linearize(ctxt, addr, size, false, &linear);
9fa088f4
AK
853 if (rc != X86EMUL_CONTINUE)
854 return rc;
3c9fa24c 855 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
3ca3ac4d
AK
856}
857
129a72a0
SR
858static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
859 struct segmented_address addr,
860 void *data,
861 unsigned int size)
862{
863 int rc;
864 ulong linear;
865
866 rc = linearize(ctxt, addr, size, true, &linear);
867 if (rc != X86EMUL_CONTINUE)
868 return rc;
3c9fa24c 869 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
129a72a0
SR
870}
871
807941b1 872/*
285ca9e9 873 * Prefetch the remaining bytes of the instruction without crossing page
807941b1
TY
874 * boundary if they are not in fetch_cache yet.
875 */
9506d57d 876static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
62266869 877{
62266869 878 int rc;
fd56e154 879 unsigned size, max_size;
285ca9e9 880 unsigned long linear;
17052f16 881 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
285ca9e9 882 struct segmented_address addr = { .seg = VCPU_SREG_CS,
17052f16
PB
883 .ea = ctxt->eip + cur_size };
884
fd56e154
PB
885 /*
886 * We do not know exactly how many bytes will be needed, and
887 * __linearize is expensive, so fetch as much as possible. We
888 * just have to avoid going beyond the 15 byte limit, the end
889 * of the segment, or the end of the page.
890 *
891 * __linearize is called with size 0 so that it does not do any
892 * boundary check itself. Instead, we use max_size to check
893 * against op_size.
894 */
d50eaa18
NA
895 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
896 &linear);
719d5a9b
PB
897 if (unlikely(rc != X86EMUL_CONTINUE))
898 return rc;
899
fd56e154 900 size = min_t(unsigned, 15UL ^ cur_size, max_size);
719d5a9b 901 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
5cfc7e0f
PB
902
903 /*
904 * One instruction can only straddle two pages,
905 * and one has been loaded at the beginning of
906 * x86_decode_insn. So, if not enough bytes
907 * still, we must have hit the 15-byte boundary.
908 */
909 if (unlikely(size < op_size))
fd56e154
PB
910 return emulate_gp(ctxt, 0);
911
17052f16 912 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
285ca9e9
PB
913 size, &ctxt->exception);
914 if (unlikely(rc != X86EMUL_CONTINUE))
915 return rc;
17052f16 916 ctxt->fetch.end += size;
3e2815e9 917 return X86EMUL_CONTINUE;
62266869
AK
918}
919
9506d57d
PB
920static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
921 unsigned size)
62266869 922{
08da44ae
NA
923 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
924
925 if (unlikely(done_size < size))
926 return __do_insn_fetch_bytes(ctxt, size - done_size);
9506d57d
PB
927 else
928 return X86EMUL_CONTINUE;
62266869
AK
929}
930
67cbc90d 931/* Fetch next part of the instruction being emulated. */
e85a1085 932#define insn_fetch(_type, _ctxt) \
9506d57d 933({ _type _x; \
9506d57d
PB
934 \
935 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
67cbc90d
TY
936 if (rc != X86EMUL_CONTINUE) \
937 goto done; \
9506d57d 938 ctxt->_eip += sizeof(_type); \
8616abc2 939 memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
17052f16 940 ctxt->fetch.ptr += sizeof(_type); \
9506d57d 941 _x; \
67cbc90d
TY
942})
943
807941b1 944#define insn_fetch_arr(_arr, _size, _ctxt) \
9506d57d 945({ \
9506d57d 946 rc = do_insn_fetch_bytes(_ctxt, _size); \
67cbc90d
TY
947 if (rc != X86EMUL_CONTINUE) \
948 goto done; \
9506d57d 949 ctxt->_eip += (_size); \
17052f16
PB
950 memcpy(_arr, ctxt->fetch.ptr, _size); \
951 ctxt->fetch.ptr += (_size); \
67cbc90d
TY
952})
953
1e3c5cb0
RR
954/*
955 * Given the 'reg' portion of a ModRM byte, and a register block, return a
956 * pointer into the block that addresses the relevant register.
957 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
958 */
dd856efa 959static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
aa9ac1a6 960 int byteop)
6aa8b732
AK
961{
962 void *p;
aa9ac1a6 963 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
6aa8b732 964
6aa8b732 965 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
dd856efa
AK
966 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
967 else
968 p = reg_rmw(ctxt, modrm_reg);
6aa8b732
AK
969 return p;
970}
971
972static int read_descriptor(struct x86_emulate_ctxt *ctxt,
90de84f5 973 struct segmented_address addr,
6aa8b732
AK
974 u16 *size, unsigned long *address, int op_bytes)
975{
976 int rc;
977
978 if (op_bytes == 2)
979 op_bytes = 3;
980 *address = 0;
3ca3ac4d 981 rc = segmented_read_std(ctxt, addr, size, 2);
1b30eaa8 982 if (rc != X86EMUL_CONTINUE)
6aa8b732 983 return rc;
30b31ab6 984 addr.ea += 2;
3ca3ac4d 985 rc = segmented_read_std(ctxt, addr, address, op_bytes);
6aa8b732
AK
986 return rc;
987}
988
34b77652
AK
989FASTOP2(add);
990FASTOP2(or);
991FASTOP2(adc);
992FASTOP2(sbb);
993FASTOP2(and);
994FASTOP2(sub);
995FASTOP2(xor);
996FASTOP2(cmp);
997FASTOP2(test);
998
b9fa409b
AK
999FASTOP1SRC2(mul, mul_ex);
1000FASTOP1SRC2(imul, imul_ex);
b8c0b6ae
AK
1001FASTOP1SRC2EX(div, div_ex);
1002FASTOP1SRC2EX(idiv, idiv_ex);
b9fa409b 1003
34b77652
AK
1004FASTOP3WCL(shld);
1005FASTOP3WCL(shrd);
1006
1007FASTOP2W(imul);
1008
1009FASTOP1(not);
1010FASTOP1(neg);
1011FASTOP1(inc);
1012FASTOP1(dec);
1013
1014FASTOP2CL(rol);
1015FASTOP2CL(ror);
1016FASTOP2CL(rcl);
1017FASTOP2CL(rcr);
1018FASTOP2CL(shl);
1019FASTOP2CL(shr);
1020FASTOP2CL(sar);
1021
1022FASTOP2W(bsf);
1023FASTOP2W(bsr);
1024FASTOP2W(bt);
1025FASTOP2W(bts);
1026FASTOP2W(btr);
1027FASTOP2W(btc);
1028
e47a5f5f
AK
1029FASTOP2(xadd);
1030
5aca3722
NA
1031FASTOP2R(cmp, cmp_r);
1032
900efe20
NA
1033static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1034{
1035 /* If src is zero, do not writeback, but update flags */
1036 if (ctxt->src.val == 0)
1037 ctxt->dst.type = OP_NONE;
1038 return fastop(ctxt, em_bsf);
1039}
1040
1041static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1042{
1043 /* If src is zero, do not writeback, but update flags */
1044 if (ctxt->src.val == 0)
1045 ctxt->dst.type = OP_NONE;
1046 return fastop(ctxt, em_bsr);
1047}
1048
cb7390fe 1049static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
bbe9abbd 1050{
9ae9feba
AK
1051 u8 rc;
1052 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
bbe9abbd 1053
9ae9feba 1054 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1a29b5b7
PZ
1055 asm("push %[flags]; popf; " CALL_NOSPEC
1056 : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
9ae9feba 1057 return rc;
bbe9abbd
NK
1058}
1059
91ff3cb4
AK
1060static void fetch_register_operand(struct operand *op)
1061{
1062 switch (op->bytes) {
1063 case 1:
1064 op->val = *(u8 *)op->addr.reg;
1065 break;
1066 case 2:
1067 op->val = *(u16 *)op->addr.reg;
1068 break;
1069 case 4:
1070 op->val = *(u32 *)op->addr.reg;
1071 break;
1072 case 8:
1073 op->val = *(u64 *)op->addr.reg;
1074 break;
1075 }
1076}
1077
1253791d
AK
1078static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
1079{
1253791d 1080 switch (reg) {
89a87c67
MK
1081 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
1082 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
1083 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
1084 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
1085 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
1086 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
1087 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
1088 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
1253791d 1089#ifdef CONFIG_X86_64
89a87c67
MK
1090 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
1091 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
1092 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
1093 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
1094 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
1095 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
1096 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
1097 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
1253791d
AK
1098#endif
1099 default: BUG();
1100 }
1253791d
AK
1101}
1102
1103static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
1104 int reg)
1105{
1253791d 1106 switch (reg) {
89a87c67
MK
1107 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1108 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
1109 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
1110 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
1111 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
1112 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
1113 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
1114 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
1253791d 1115#ifdef CONFIG_X86_64
89a87c67
MK
1116 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1117 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1118 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1119 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1120 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1121 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1122 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1123 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1253791d
AK
1124#endif
1125 default: BUG();
1126 }
1253791d
AK
1127}
1128
cbe2c9d3
AK
1129static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1130{
cbe2c9d3
AK
1131 switch (reg) {
1132 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1133 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1134 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1135 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1136 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1137 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1138 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1139 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1140 default: BUG();
1141 }
cbe2c9d3
AK
1142}
1143
1144static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1145{
cbe2c9d3
AK
1146 switch (reg) {
1147 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1148 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1149 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1150 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1151 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1152 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1153 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1154 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1155 default: BUG();
1156 }
cbe2c9d3
AK
1157}
1158
045a282c
GN
1159static int em_fninit(struct x86_emulate_ctxt *ctxt)
1160{
1161 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1162 return emulate_nm(ctxt);
1163
045a282c 1164 asm volatile("fninit");
045a282c
GN
1165 return X86EMUL_CONTINUE;
1166}
1167
1168static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1169{
1170 u16 fcw;
1171
1172 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1173 return emulate_nm(ctxt);
1174
045a282c 1175 asm volatile("fnstcw %0": "+m"(fcw));
045a282c 1176
045a282c
GN
1177 ctxt->dst.val = fcw;
1178
1179 return X86EMUL_CONTINUE;
1180}
1181
1182static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1183{
1184 u16 fsw;
1185
1186 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1187 return emulate_nm(ctxt);
1188
045a282c 1189 asm volatile("fnstsw %0": "+m"(fsw));
045a282c 1190
045a282c
GN
1191 ctxt->dst.val = fsw;
1192
1193 return X86EMUL_CONTINUE;
1194}
1195
1253791d 1196static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
2adb5ad9 1197 struct operand *op)
3c118e24 1198{
9dac77fa 1199 unsigned reg = ctxt->modrm_reg;
33615aa9 1200
9dac77fa
AK
1201 if (!(ctxt->d & ModRM))
1202 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1253791d 1203
9dac77fa 1204 if (ctxt->d & Sse) {
1253791d
AK
1205 op->type = OP_XMM;
1206 op->bytes = 16;
1207 op->addr.xmm = reg;
1208 read_sse_reg(ctxt, &op->vec_val, reg);
1209 return;
1210 }
cbe2c9d3
AK
1211 if (ctxt->d & Mmx) {
1212 reg &= 7;
1213 op->type = OP_MM;
1214 op->bytes = 8;
1215 op->addr.mm = reg;
1216 return;
1217 }
1253791d 1218
3c118e24 1219 op->type = OP_REG;
6d4d85ec
GN
1220 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1221 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1222
91ff3cb4 1223 fetch_register_operand(op);
3c118e24
AK
1224 op->orig_val = op->val;
1225}
1226
a6e3407b
AK
1227static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1228{
1229 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1230 ctxt->modrm_seg = VCPU_SREG_SS;
1231}
1232
1c73ef66 1233static int decode_modrm(struct x86_emulate_ctxt *ctxt,
2dbd0dd7 1234 struct operand *op)
1c73ef66 1235{
1c73ef66 1236 u8 sib;
02357bdc 1237 int index_reg, base_reg, scale;
3e2815e9 1238 int rc = X86EMUL_CONTINUE;
2dbd0dd7 1239 ulong modrm_ea = 0;
1c73ef66 1240
02357bdc
BD
1241 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1242 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1243 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1c73ef66 1244
02357bdc 1245 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
9dac77fa 1246 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
02357bdc 1247 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
9dac77fa 1248 ctxt->modrm_seg = VCPU_SREG_DS;
1c73ef66 1249
9b88ae99 1250 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
2dbd0dd7 1251 op->type = OP_REG;
9dac77fa 1252 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
8acb4207 1253 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
aa9ac1a6 1254 ctxt->d & ByteOp);
9dac77fa 1255 if (ctxt->d & Sse) {
1253791d
AK
1256 op->type = OP_XMM;
1257 op->bytes = 16;
9dac77fa
AK
1258 op->addr.xmm = ctxt->modrm_rm;
1259 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1253791d
AK
1260 return rc;
1261 }
cbe2c9d3
AK
1262 if (ctxt->d & Mmx) {
1263 op->type = OP_MM;
1264 op->bytes = 8;
bdc90722 1265 op->addr.mm = ctxt->modrm_rm & 7;
cbe2c9d3
AK
1266 return rc;
1267 }
2dbd0dd7 1268 fetch_register_operand(op);
1c73ef66
AK
1269 return rc;
1270 }
1271
2dbd0dd7
AK
1272 op->type = OP_MEM;
1273
9dac77fa 1274 if (ctxt->ad_bytes == 2) {
dd856efa
AK
1275 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1276 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1277 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1278 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1c73ef66
AK
1279
1280 /* 16-bit ModR/M decode. */
9dac77fa 1281 switch (ctxt->modrm_mod) {
1c73ef66 1282 case 0:
9dac77fa 1283 if (ctxt->modrm_rm == 6)
e85a1085 1284 modrm_ea += insn_fetch(u16, ctxt);
1c73ef66
AK
1285 break;
1286 case 1:
e85a1085 1287 modrm_ea += insn_fetch(s8, ctxt);
1c73ef66
AK
1288 break;
1289 case 2:
e85a1085 1290 modrm_ea += insn_fetch(u16, ctxt);
1c73ef66
AK
1291 break;
1292 }
9dac77fa 1293 switch (ctxt->modrm_rm) {
1c73ef66 1294 case 0:
2dbd0dd7 1295 modrm_ea += bx + si;
1c73ef66
AK
1296 break;
1297 case 1:
2dbd0dd7 1298 modrm_ea += bx + di;
1c73ef66
AK
1299 break;
1300 case 2:
2dbd0dd7 1301 modrm_ea += bp + si;
1c73ef66
AK
1302 break;
1303 case 3:
2dbd0dd7 1304 modrm_ea += bp + di;
1c73ef66
AK
1305 break;
1306 case 4:
2dbd0dd7 1307 modrm_ea += si;
1c73ef66
AK
1308 break;
1309 case 5:
2dbd0dd7 1310 modrm_ea += di;
1c73ef66
AK
1311 break;
1312 case 6:
9dac77fa 1313 if (ctxt->modrm_mod != 0)
2dbd0dd7 1314 modrm_ea += bp;
1c73ef66
AK
1315 break;
1316 case 7:
2dbd0dd7 1317 modrm_ea += bx;
1c73ef66
AK
1318 break;
1319 }
9dac77fa
AK
1320 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1321 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1322 ctxt->modrm_seg = VCPU_SREG_SS;
2dbd0dd7 1323 modrm_ea = (u16)modrm_ea;
1c73ef66
AK
1324 } else {
1325 /* 32/64-bit ModR/M decode. */
9dac77fa 1326 if ((ctxt->modrm_rm & 7) == 4) {
e85a1085 1327 sib = insn_fetch(u8, ctxt);
1c73ef66
AK
1328 index_reg |= (sib >> 3) & 7;
1329 base_reg |= sib & 7;
1330 scale = sib >> 6;
1331
9dac77fa 1332 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
e85a1085 1333 modrm_ea += insn_fetch(s32, ctxt);
a6e3407b 1334 else {
dd856efa 1335 modrm_ea += reg_read(ctxt, base_reg);
a6e3407b 1336 adjust_modrm_seg(ctxt, base_reg);
ab708099
NA
1337 /* Increment ESP on POP [ESP] */
1338 if ((ctxt->d & IncSP) &&
1339 base_reg == VCPU_REGS_RSP)
1340 modrm_ea += ctxt->op_bytes;
a6e3407b 1341 }
dc71d0f1 1342 if (index_reg != 4)
dd856efa 1343 modrm_ea += reg_read(ctxt, index_reg) << scale;
9dac77fa 1344 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
5b38ab87 1345 modrm_ea += insn_fetch(s32, ctxt);
84411d85 1346 if (ctxt->mode == X86EMUL_MODE_PROT64)
9dac77fa 1347 ctxt->rip_relative = 1;
a6e3407b
AK
1348 } else {
1349 base_reg = ctxt->modrm_rm;
dd856efa 1350 modrm_ea += reg_read(ctxt, base_reg);
a6e3407b
AK
1351 adjust_modrm_seg(ctxt, base_reg);
1352 }
9dac77fa 1353 switch (ctxt->modrm_mod) {
1c73ef66 1354 case 1:
e85a1085 1355 modrm_ea += insn_fetch(s8, ctxt);
1c73ef66
AK
1356 break;
1357 case 2:
e85a1085 1358 modrm_ea += insn_fetch(s32, ctxt);
1c73ef66
AK
1359 break;
1360 }
1361 }
90de84f5 1362 op->addr.mem.ea = modrm_ea;
41061cdb
BD
1363 if (ctxt->ad_bytes != 8)
1364 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1365
1c73ef66
AK
1366done:
1367 return rc;
1368}
1369
1370static int decode_abs(struct x86_emulate_ctxt *ctxt,
2dbd0dd7 1371 struct operand *op)
1c73ef66 1372{
3e2815e9 1373 int rc = X86EMUL_CONTINUE;
1c73ef66 1374
2dbd0dd7 1375 op->type = OP_MEM;
9dac77fa 1376 switch (ctxt->ad_bytes) {
1c73ef66 1377 case 2:
e85a1085 1378 op->addr.mem.ea = insn_fetch(u16, ctxt);
1c73ef66
AK
1379 break;
1380 case 4:
e85a1085 1381 op->addr.mem.ea = insn_fetch(u32, ctxt);
1c73ef66
AK
1382 break;
1383 case 8:
e85a1085 1384 op->addr.mem.ea = insn_fetch(u64, ctxt);
1c73ef66
AK
1385 break;
1386 }
1387done:
1388 return rc;
1389}
1390
9dac77fa 1391static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
35c843c4 1392{
7129eeca 1393 long sv = 0, mask;
35c843c4 1394
9dac77fa 1395 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
7dec5603 1396 mask = ~((long)ctxt->dst.bytes * 8 - 1);
35c843c4 1397
9dac77fa
AK
1398 if (ctxt->src.bytes == 2)
1399 sv = (s16)ctxt->src.val & (s16)mask;
1400 else if (ctxt->src.bytes == 4)
1401 sv = (s32)ctxt->src.val & (s32)mask;
7dec5603
NA
1402 else
1403 sv = (s64)ctxt->src.val & (s64)mask;
35c843c4 1404
1c1c35ae
NA
1405 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1406 ctxt->dst.addr.mem.ea + (sv >> 3));
35c843c4 1407 }
ba7ff2b7
WY
1408
1409 /* only subword offset */
9dac77fa 1410 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
35c843c4
WY
1411}
1412
dde7e6d1 1413static int read_emulated(struct x86_emulate_ctxt *ctxt,
dde7e6d1 1414 unsigned long addr, void *dest, unsigned size)
6aa8b732 1415{
dde7e6d1 1416 int rc;
9dac77fa 1417 struct read_cache *mc = &ctxt->mem_read;
6aa8b732 1418
f23b070e
XG
1419 if (mc->pos < mc->end)
1420 goto read_cached;
6aa8b732 1421
f23b070e
XG
1422 WARN_ON((mc->end + size) >= sizeof(mc->data));
1423
1424 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1425 &ctxt->exception);
1426 if (rc != X86EMUL_CONTINUE)
1427 return rc;
1428
1429 mc->end += size;
1430
1431read_cached:
1432 memcpy(dest, mc->data + mc->pos, size);
1433 mc->pos += size;
dde7e6d1
AK
1434 return X86EMUL_CONTINUE;
1435}
6aa8b732 1436
3ca3ac4d
AK
1437static int segmented_read(struct x86_emulate_ctxt *ctxt,
1438 struct segmented_address addr,
1439 void *data,
1440 unsigned size)
1441{
9fa088f4
AK
1442 int rc;
1443 ulong linear;
1444
83b8795a 1445 rc = linearize(ctxt, addr, size, false, &linear);
9fa088f4
AK
1446 if (rc != X86EMUL_CONTINUE)
1447 return rc;
7b105ca2 1448 return read_emulated(ctxt, linear, data, size);
3ca3ac4d
AK
1449}
1450
1451static int segmented_write(struct x86_emulate_ctxt *ctxt,
1452 struct segmented_address addr,
1453 const void *data,
1454 unsigned size)
1455{
9fa088f4
AK
1456 int rc;
1457 ulong linear;
1458
83b8795a 1459 rc = linearize(ctxt, addr, size, true, &linear);
9fa088f4
AK
1460 if (rc != X86EMUL_CONTINUE)
1461 return rc;
0f65dd70
AK
1462 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1463 &ctxt->exception);
3ca3ac4d
AK
1464}
1465
1466static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1467 struct segmented_address addr,
1468 const void *orig_data, const void *data,
1469 unsigned size)
1470{
9fa088f4
AK
1471 int rc;
1472 ulong linear;
1473
83b8795a 1474 rc = linearize(ctxt, addr, size, true, &linear);
9fa088f4
AK
1475 if (rc != X86EMUL_CONTINUE)
1476 return rc;
0f65dd70
AK
1477 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1478 size, &ctxt->exception);
3ca3ac4d
AK
1479}
1480
dde7e6d1 1481static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
dde7e6d1
AK
1482 unsigned int size, unsigned short port,
1483 void *dest)
1484{
9dac77fa 1485 struct read_cache *rc = &ctxt->io_read;
b4c6abfe 1486
dde7e6d1 1487 if (rc->pos == rc->end) { /* refill pio read ahead */
dde7e6d1 1488 unsigned int in_page, n;
9dac77fa 1489 unsigned int count = ctxt->rep_prefix ?
dd856efa 1490 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
0efb0440 1491 in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
dd856efa
AK
1492 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1493 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
b55a8144 1494 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
dde7e6d1
AK
1495 if (n == 0)
1496 n = 1;
1497 rc->pos = rc->end = 0;
7b105ca2 1498 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
dde7e6d1
AK
1499 return 0;
1500 rc->end = n * size;
6aa8b732
AK
1501 }
1502
e6e39f04 1503 if (ctxt->rep_prefix && (ctxt->d & String) &&
0efb0440 1504 !(ctxt->eflags & X86_EFLAGS_DF)) {
b3356bf0
GN
1505 ctxt->dst.data = rc->data + rc->pos;
1506 ctxt->dst.type = OP_MEM_STR;
1507 ctxt->dst.count = (rc->end - rc->pos) / size;
1508 rc->pos = rc->end;
1509 } else {
1510 memcpy(dest, rc->data + rc->pos, size);
1511 rc->pos += size;
1512 }
dde7e6d1
AK
1513 return 1;
1514}
6aa8b732 1515
7f3d35fd
KW
1516static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1517 u16 index, struct desc_struct *desc)
1518{
1519 struct desc_ptr dt;
1520 ulong addr;
1521
1522 ctxt->ops->get_idt(ctxt, &dt);
1523
1524 if (dt.size < index * 8 + 7)
1525 return emulate_gp(ctxt, index << 3 | 0x2);
1526
1527 addr = dt.address + index * 8;
0e96f31e 1528 return linear_read_system(ctxt, addr, desc, sizeof(*desc));
7f3d35fd
KW
1529}
1530
dde7e6d1 1531static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
dde7e6d1
AK
1532 u16 selector, struct desc_ptr *dt)
1533{
0225fb50 1534 const struct x86_emulate_ops *ops = ctxt->ops;
2eedcac8 1535 u32 base3 = 0;
7b105ca2 1536
dde7e6d1
AK
1537 if (selector & 1 << 2) {
1538 struct desc_struct desc;
1aa36616
AK
1539 u16 sel;
1540
0e96f31e 1541 memset(dt, 0, sizeof(*dt));
2eedcac8
NA
1542 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1543 VCPU_SREG_LDTR))
dde7e6d1 1544 return;
e09d082c 1545
dde7e6d1 1546 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
2eedcac8 1547 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
dde7e6d1 1548 } else
4bff1e86 1549 ops->get_gdt(ctxt, dt);
dde7e6d1 1550}
120df890 1551
edccda7c
NA
1552static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1553 u16 selector, ulong *desc_addr_p)
dde7e6d1
AK
1554{
1555 struct desc_ptr dt;
1556 u16 index = selector >> 3;
dde7e6d1 1557 ulong addr;
120df890 1558
7b105ca2 1559 get_descriptor_table_ptr(ctxt, selector, &dt);
120df890 1560
35d3d4a1
AK
1561 if (dt.size < index * 8 + 7)
1562 return emulate_gp(ctxt, selector & 0xfffc);
e09d082c 1563
edccda7c
NA
1564 addr = dt.address + index * 8;
1565
1566#ifdef CONFIG_X86_64
1567 if (addr >> 32 != 0) {
1568 u64 efer = 0;
1569
1570 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1571 if (!(efer & EFER_LMA))
1572 addr &= (u32)-1;
1573 }
1574#endif
1575
1576 *desc_addr_p = addr;
1577 return X86EMUL_CONTINUE;
1578}
1579
1580/* allowed just for 8 bytes segments */
1581static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1582 u16 selector, struct desc_struct *desc,
1583 ulong *desc_addr_p)
1584{
1585 int rc;
1586
1587 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1588 if (rc != X86EMUL_CONTINUE)
1589 return rc;
1590
79367a65 1591 return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
dde7e6d1 1592}
ef65c889 1593
dde7e6d1
AK
1594/* allowed just for 8 bytes segments */
1595static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
dde7e6d1
AK
1596 u16 selector, struct desc_struct *desc)
1597{
edccda7c 1598 int rc;
dde7e6d1 1599 ulong addr;
6aa8b732 1600
edccda7c
NA
1601 rc = get_descriptor_ptr(ctxt, selector, &addr);
1602 if (rc != X86EMUL_CONTINUE)
1603 return rc;
6aa8b732 1604
0e96f31e 1605 return linear_write_system(ctxt, addr, desc, sizeof(*desc));
dde7e6d1 1606}
c7e75a3d 1607
2356aaeb 1608static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
d1442d85 1609 u16 selector, int seg, u8 cpl,
3dc4bc4f 1610 enum x86_transfer_type transfer,
d1442d85 1611 struct desc_struct *desc)
dde7e6d1 1612{
869be99c 1613 struct desc_struct seg_desc, old_desc;
2356aaeb 1614 u8 dpl, rpl;
dde7e6d1
AK
1615 unsigned err_vec = GP_VECTOR;
1616 u32 err_code = 0;
1617 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
e919464b 1618 ulong desc_addr;
dde7e6d1 1619 int ret;
03ebebeb 1620 u16 dummy;
e37a75a1 1621 u32 base3 = 0;
69f55cb1 1622
0e96f31e 1623 memset(&seg_desc, 0, sizeof(seg_desc));
69f55cb1 1624
f8da94e9
KW
1625 if (ctxt->mode == X86EMUL_MODE_REAL) {
1626 /* set real mode segment descriptor (keep limit etc. for
1627 * unreal mode) */
03ebebeb 1628 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
dde7e6d1 1629 set_desc_base(&seg_desc, selector << 4);
dde7e6d1 1630 goto load;
f8da94e9
KW
1631 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1632 /* VM86 needs a clean new segment descriptor */
1633 set_desc_base(&seg_desc, selector << 4);
1634 set_desc_limit(&seg_desc, 0xffff);
1635 seg_desc.type = 3;
1636 seg_desc.p = 1;
1637 seg_desc.s = 1;
1638 seg_desc.dpl = 3;
1639 goto load;
dde7e6d1
AK
1640 }
1641
79d5b4c3 1642 rpl = selector & 3;
79d5b4c3 1643
dde7e6d1
AK
1644 /* TR should be in GDT only */
1645 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1646 goto exception;
1647
33ab9110
PB
1648 /* NULL selector is not valid for TR, CS and (except for long mode) SS */
1649 if (null_selector) {
1650 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1651 goto exception;
1652
1653 if (seg == VCPU_SREG_SS) {
1654 if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1655 goto exception;
1656
1657 /*
1658 * ctxt->ops->set_segment expects the CPL to be in
1659 * SS.DPL, so fake an expand-up 32-bit data segment.
1660 */
1661 seg_desc.type = 3;
1662 seg_desc.p = 1;
1663 seg_desc.s = 1;
1664 seg_desc.dpl = cpl;
1665 seg_desc.d = 1;
1666 seg_desc.g = 1;
1667 }
1668
1669 /* Skip all following checks */
dde7e6d1 1670 goto load;
33ab9110 1671 }
dde7e6d1 1672
e919464b 1673 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
dde7e6d1
AK
1674 if (ret != X86EMUL_CONTINUE)
1675 return ret;
1676
1677 err_code = selector & 0xfffc;
3dc4bc4f
NA
1678 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1679 GP_VECTOR;
dde7e6d1 1680
fc058680 1681 /* can't load system descriptor into segment selector */
3dc4bc4f
NA
1682 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1683 if (transfer == X86_TRANSFER_CALL_JMP)
1684 return X86EMUL_UNHANDLEABLE;
dde7e6d1 1685 goto exception;
3dc4bc4f 1686 }
dde7e6d1
AK
1687
1688 if (!seg_desc.p) {
1689 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1690 goto exception;
1691 }
1692
dde7e6d1 1693 dpl = seg_desc.dpl;
dde7e6d1
AK
1694
1695 switch (seg) {
1696 case VCPU_SREG_SS:
1697 /*
1698 * segment is not a writable data segment or segment
1699 * selector's RPL != CPL or segment selector's RPL != CPL
1700 */
1701 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1702 goto exception;
6aa8b732 1703 break;
dde7e6d1
AK
1704 case VCPU_SREG_CS:
1705 if (!(seg_desc.type & 8))
1706 goto exception;
1707
1708 if (seg_desc.type & 4) {
1709 /* conforming */
1710 if (dpl > cpl)
1711 goto exception;
1712 } else {
1713 /* nonconforming */
1714 if (rpl > cpl || dpl != cpl)
1715 goto exception;
1716 }
040c8dc8
NA
1717 /* in long-mode d/b must be clear if l is set */
1718 if (seg_desc.d && seg_desc.l) {
1719 u64 efer = 0;
1720
1721 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1722 if (efer & EFER_LMA)
1723 goto exception;
1724 }
1725
dde7e6d1
AK
1726 /* CS(RPL) <- CPL */
1727 selector = (selector & 0xfffc) | cpl;
6aa8b732 1728 break;
dde7e6d1
AK
1729 case VCPU_SREG_TR:
1730 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1731 goto exception;
869be99c
AK
1732 old_desc = seg_desc;
1733 seg_desc.type |= 2; /* busy */
1734 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1735 sizeof(seg_desc), &ctxt->exception);
1736 if (ret != X86EMUL_CONTINUE)
1737 return ret;
dde7e6d1
AK
1738 break;
1739 case VCPU_SREG_LDTR:
1740 if (seg_desc.s || seg_desc.type != 2)
1741 goto exception;
1742 break;
1743 default: /* DS, ES, FS, or GS */
4e62417b 1744 /*
dde7e6d1
AK
1745 * segment is not a data or readable code segment or
1746 * ((segment is a data or nonconforming code segment)
1747 * and (both RPL and CPL > DPL))
4e62417b 1748 */
dde7e6d1
AK
1749 if ((seg_desc.type & 0xa) == 0x8 ||
1750 (((seg_desc.type & 0xc) != 0xc) &&
1751 (rpl > dpl && cpl > dpl)))
1752 goto exception;
6aa8b732 1753 break;
dde7e6d1
AK
1754 }
1755
1756 if (seg_desc.s) {
1757 /* mark segment as accessed */
e2cefa74
NA
1758 if (!(seg_desc.type & 1)) {
1759 seg_desc.type |= 1;
1760 ret = write_segment_descriptor(ctxt, selector,
1761 &seg_desc);
1762 if (ret != X86EMUL_CONTINUE)
1763 return ret;
1764 }
e37a75a1 1765 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
79367a65 1766 ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
e37a75a1
NA
1767 if (ret != X86EMUL_CONTINUE)
1768 return ret;
fd8cb433
YZ
1769 if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
1770 ((u64)base3 << 32), ctxt))
9a9abf6b 1771 return emulate_gp(ctxt, 0);
dde7e6d1
AK
1772 }
1773load:
e37a75a1 1774 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
d1442d85
NA
1775 if (desc)
1776 *desc = seg_desc;
dde7e6d1
AK
1777 return X86EMUL_CONTINUE;
1778exception:
592f0858 1779 return emulate_exception(ctxt, err_vec, err_code, true);
dde7e6d1
AK
1780}
1781
2356aaeb
PB
1782static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1783 u16 selector, int seg)
1784{
1785 u8 cpl = ctxt->ops->cpl(ctxt);
33ab9110
PB
1786
1787 /*
1788 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
1789 * they can load it at CPL<3 (Intel's manual says only LSS can,
1790 * but it's wrong).
1791 *
1792 * However, the Intel manual says that putting IST=1/DPL=3 in
1793 * an interrupt gate will result in SS=3 (the AMD manual instead
1794 * says it doesn't), so allow SS=3 in __load_segment_descriptor
1795 * and only forbid it here.
1796 */
1797 if (seg == VCPU_SREG_SS && selector == 3 &&
1798 ctxt->mode == X86EMUL_MODE_PROT64)
1799 return emulate_exception(ctxt, GP_VECTOR, 0, true);
1800
3dc4bc4f
NA
1801 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1802 X86_TRANSFER_NONE, NULL);
2356aaeb
PB
1803}
1804
31be40b3
WY
1805static void write_register_operand(struct operand *op)
1806{
6fd8e127 1807 return assign_register(op->addr.reg, op->val, op->bytes);
31be40b3
WY
1808}
1809
fb32b1ed 1810static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
dde7e6d1 1811{
fb32b1ed 1812 switch (op->type) {
dde7e6d1 1813 case OP_REG:
fb32b1ed 1814 write_register_operand(op);
6aa8b732 1815 break;
dde7e6d1 1816 case OP_MEM:
9dac77fa 1817 if (ctxt->lock_prefix)
f5f87dfb
PB
1818 return segmented_cmpxchg(ctxt,
1819 op->addr.mem,
1820 &op->orig_val,
1821 &op->val,
1822 op->bytes);
1823 else
1824 return segmented_write(ctxt,
fb32b1ed 1825 op->addr.mem,
fb32b1ed
AK
1826 &op->val,
1827 op->bytes);
a682e354 1828 break;
b3356bf0 1829 case OP_MEM_STR:
f5f87dfb
PB
1830 return segmented_write(ctxt,
1831 op->addr.mem,
1832 op->data,
1833 op->bytes * op->count);
b3356bf0 1834 break;
1253791d 1835 case OP_XMM:
fb32b1ed 1836 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1253791d 1837 break;
cbe2c9d3 1838 case OP_MM:
fb32b1ed 1839 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
cbe2c9d3 1840 break;
dde7e6d1
AK
1841 case OP_NONE:
1842 /* no writeback */
414e6277 1843 break;
dde7e6d1 1844 default:
414e6277 1845 break;
6aa8b732 1846 }
dde7e6d1
AK
1847 return X86EMUL_CONTINUE;
1848}
6aa8b732 1849
51ddff50 1850static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
dde7e6d1 1851{
4179bb02 1852 struct segmented_address addr;
0dc8d10f 1853
5ad105e5 1854 rsp_increment(ctxt, -bytes);
dd856efa 1855 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
4179bb02
TY
1856 addr.seg = VCPU_SREG_SS;
1857
51ddff50
AK
1858 return segmented_write(ctxt, addr, data, bytes);
1859}
1860
1861static int em_push(struct x86_emulate_ctxt *ctxt)
1862{
4179bb02 1863 /* Disable writeback. */
9dac77fa 1864 ctxt->dst.type = OP_NONE;
51ddff50 1865 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
dde7e6d1 1866}
69f55cb1 1867
dde7e6d1 1868static int emulate_pop(struct x86_emulate_ctxt *ctxt,
dde7e6d1
AK
1869 void *dest, int len)
1870{
dde7e6d1 1871 int rc;
90de84f5 1872 struct segmented_address addr;
8b4caf66 1873
dd856efa 1874 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
90de84f5 1875 addr.seg = VCPU_SREG_SS;
3ca3ac4d 1876 rc = segmented_read(ctxt, addr, dest, len);
dde7e6d1
AK
1877 if (rc != X86EMUL_CONTINUE)
1878 return rc;
1879
5ad105e5 1880 rsp_increment(ctxt, len);
dde7e6d1 1881 return rc;
8b4caf66
LV
1882}
1883
c54fe504
TY
1884static int em_pop(struct x86_emulate_ctxt *ctxt)
1885{
9dac77fa 1886 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
c54fe504
TY
1887}
1888
dde7e6d1 1889static int emulate_popf(struct x86_emulate_ctxt *ctxt,
7b105ca2 1890 void *dest, int len)
9de41573
GN
1891{
1892 int rc;
dde7e6d1 1893 unsigned long val, change_mask;
0efb0440 1894 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
7b105ca2 1895 int cpl = ctxt->ops->cpl(ctxt);
9de41573 1896
3b9be3bf 1897 rc = emulate_pop(ctxt, &val, len);
dde7e6d1
AK
1898 if (rc != X86EMUL_CONTINUE)
1899 return rc;
9de41573 1900
0efb0440
NA
1901 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1902 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1903 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1904 X86_EFLAGS_AC | X86_EFLAGS_ID;
9de41573 1905
dde7e6d1
AK
1906 switch(ctxt->mode) {
1907 case X86EMUL_MODE_PROT64:
1908 case X86EMUL_MODE_PROT32:
1909 case X86EMUL_MODE_PROT16:
1910 if (cpl == 0)
0efb0440 1911 change_mask |= X86_EFLAGS_IOPL;
dde7e6d1 1912 if (cpl <= iopl)
0efb0440 1913 change_mask |= X86_EFLAGS_IF;
dde7e6d1
AK
1914 break;
1915 case X86EMUL_MODE_VM86:
35d3d4a1
AK
1916 if (iopl < 3)
1917 return emulate_gp(ctxt, 0);
0efb0440 1918 change_mask |= X86_EFLAGS_IF;
dde7e6d1
AK
1919 break;
1920 default: /* real mode */
0efb0440 1921 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
dde7e6d1 1922 break;
9de41573 1923 }
dde7e6d1
AK
1924
1925 *(unsigned long *)dest =
1926 (ctxt->eflags & ~change_mask) | (val & change_mask);
1927
1928 return rc;
9de41573
GN
1929}
1930
62aaa2f0
TY
1931static int em_popf(struct x86_emulate_ctxt *ctxt)
1932{
9dac77fa
AK
1933 ctxt->dst.type = OP_REG;
1934 ctxt->dst.addr.reg = &ctxt->eflags;
1935 ctxt->dst.bytes = ctxt->op_bytes;
1936 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
62aaa2f0
TY
1937}
1938
612e89f0
AK
1939static int em_enter(struct x86_emulate_ctxt *ctxt)
1940{
1941 int rc;
1942 unsigned frame_size = ctxt->src.val;
1943 unsigned nesting_level = ctxt->src2.val & 31;
dd856efa 1944 ulong rbp;
612e89f0
AK
1945
1946 if (nesting_level)
1947 return X86EMUL_UNHANDLEABLE;
1948
dd856efa
AK
1949 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1950 rc = push(ctxt, &rbp, stack_size(ctxt));
612e89f0
AK
1951 if (rc != X86EMUL_CONTINUE)
1952 return rc;
dd856efa 1953 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
612e89f0 1954 stack_mask(ctxt));
dd856efa
AK
1955 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1956 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
612e89f0
AK
1957 stack_mask(ctxt));
1958 return X86EMUL_CONTINUE;
1959}
1960
f47cfa31
AK
1961static int em_leave(struct x86_emulate_ctxt *ctxt)
1962{
dd856efa 1963 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
f47cfa31 1964 stack_mask(ctxt));
dd856efa 1965 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
f47cfa31
AK
1966}
1967
1cd196ea 1968static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
7b262e90 1969{
1cd196ea
AK
1970 int seg = ctxt->src2.val;
1971
9dac77fa 1972 ctxt->src.val = get_segment_selector(ctxt, seg);
0fcc207c
NA
1973 if (ctxt->op_bytes == 4) {
1974 rsp_increment(ctxt, -2);
1975 ctxt->op_bytes = 2;
1976 }
7b262e90 1977
4487b3b4 1978 return em_push(ctxt);
7b262e90
GN
1979}
1980
1cd196ea 1981static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
38ba30ba 1982{
1cd196ea 1983 int seg = ctxt->src2.val;
dde7e6d1
AK
1984 unsigned long selector;
1985 int rc;
38ba30ba 1986
3313bc4e 1987 rc = emulate_pop(ctxt, &selector, 2);
dde7e6d1
AK
1988 if (rc != X86EMUL_CONTINUE)
1989 return rc;
1990
a5457e7b
PB
1991 if (ctxt->modrm_reg == VCPU_SREG_SS)
1992 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3313bc4e
NA
1993 if (ctxt->op_bytes > 2)
1994 rsp_increment(ctxt, ctxt->op_bytes - 2);
a5457e7b 1995
7b105ca2 1996 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
dde7e6d1 1997 return rc;
38ba30ba
GN
1998}
1999
b96a7fad 2000static int em_pusha(struct x86_emulate_ctxt *ctxt)
38ba30ba 2001{
dd856efa 2002 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
dde7e6d1
AK
2003 int rc = X86EMUL_CONTINUE;
2004 int reg = VCPU_REGS_RAX;
38ba30ba 2005
dde7e6d1
AK
2006 while (reg <= VCPU_REGS_RDI) {
2007 (reg == VCPU_REGS_RSP) ?
dd856efa 2008 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
38ba30ba 2009
4487b3b4 2010 rc = em_push(ctxt);
dde7e6d1
AK
2011 if (rc != X86EMUL_CONTINUE)
2012 return rc;
38ba30ba 2013
dde7e6d1 2014 ++reg;
38ba30ba 2015 }
38ba30ba 2016
dde7e6d1 2017 return rc;
38ba30ba
GN
2018}
2019
62aaa2f0
TY
2020static int em_pushf(struct x86_emulate_ctxt *ctxt)
2021{
0efb0440 2022 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
62aaa2f0
TY
2023 return em_push(ctxt);
2024}
2025
b96a7fad 2026static int em_popa(struct x86_emulate_ctxt *ctxt)
38ba30ba 2027{
dde7e6d1
AK
2028 int rc = X86EMUL_CONTINUE;
2029 int reg = VCPU_REGS_RDI;
6fd8e127 2030 u32 val;
38ba30ba 2031
dde7e6d1
AK
2032 while (reg >= VCPU_REGS_RAX) {
2033 if (reg == VCPU_REGS_RSP) {
5ad105e5 2034 rsp_increment(ctxt, ctxt->op_bytes);
dde7e6d1
AK
2035 --reg;
2036 }
38ba30ba 2037
6fd8e127 2038 rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
dde7e6d1
AK
2039 if (rc != X86EMUL_CONTINUE)
2040 break;
6fd8e127 2041 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
dde7e6d1 2042 --reg;
38ba30ba 2043 }
dde7e6d1 2044 return rc;
38ba30ba
GN
2045}
2046
dd856efa 2047static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
6e154e56 2048{
0225fb50 2049 const struct x86_emulate_ops *ops = ctxt->ops;
5c56e1cf 2050 int rc;
6e154e56
MG
2051 struct desc_ptr dt;
2052 gva_t cs_addr;
2053 gva_t eip_addr;
2054 u16 cs, eip;
6e154e56
MG
2055
2056 /* TODO: Add limit checks */
9dac77fa 2057 ctxt->src.val = ctxt->eflags;
4487b3b4 2058 rc = em_push(ctxt);
5c56e1cf
AK
2059 if (rc != X86EMUL_CONTINUE)
2060 return rc;
6e154e56 2061
0efb0440 2062 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
6e154e56 2063
9dac77fa 2064 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
4487b3b4 2065 rc = em_push(ctxt);
5c56e1cf
AK
2066 if (rc != X86EMUL_CONTINUE)
2067 return rc;
6e154e56 2068
9dac77fa 2069 ctxt->src.val = ctxt->_eip;
4487b3b4 2070 rc = em_push(ctxt);
5c56e1cf
AK
2071 if (rc != X86EMUL_CONTINUE)
2072 return rc;
2073
4bff1e86 2074 ops->get_idt(ctxt, &dt);
6e154e56
MG
2075
2076 eip_addr = dt.address + (irq << 2);
2077 cs_addr = dt.address + (irq << 2) + 2;
2078
79367a65 2079 rc = linear_read_system(ctxt, cs_addr, &cs, 2);
6e154e56
MG
2080 if (rc != X86EMUL_CONTINUE)
2081 return rc;
2082
79367a65 2083 rc = linear_read_system(ctxt, eip_addr, &eip, 2);
6e154e56
MG
2084 if (rc != X86EMUL_CONTINUE)
2085 return rc;
2086
7b105ca2 2087 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
6e154e56
MG
2088 if (rc != X86EMUL_CONTINUE)
2089 return rc;
2090
9dac77fa 2091 ctxt->_eip = eip;
6e154e56
MG
2092
2093 return rc;
2094}
2095
dd856efa
AK
2096int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2097{
2098 int rc;
2099
2100 invalidate_registers(ctxt);
2101 rc = __emulate_int_real(ctxt, irq);
2102 if (rc == X86EMUL_CONTINUE)
2103 writeback_registers(ctxt);
2104 return rc;
2105}
2106
7b105ca2 2107static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
6e154e56
MG
2108{
2109 switch(ctxt->mode) {
2110 case X86EMUL_MODE_REAL:
dd856efa 2111 return __emulate_int_real(ctxt, irq);
6e154e56
MG
2112 case X86EMUL_MODE_VM86:
2113 case X86EMUL_MODE_PROT16:
2114 case X86EMUL_MODE_PROT32:
2115 case X86EMUL_MODE_PROT64:
2116 default:
2117 /* Protected mode interrupts unimplemented yet */
2118 return X86EMUL_UNHANDLEABLE;
2119 }
2120}
2121
7b105ca2 2122static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
38ba30ba 2123{
dde7e6d1
AK
2124 int rc = X86EMUL_CONTINUE;
2125 unsigned long temp_eip = 0;
2126 unsigned long temp_eflags = 0;
2127 unsigned long cs = 0;
0efb0440
NA
2128 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2129 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2130 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2131 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2132 X86_EFLAGS_AC | X86_EFLAGS_ID |
35fd68a3 2133 X86_EFLAGS_FIXED;
0efb0440
NA
2134 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2135 X86_EFLAGS_VIP;
38ba30ba 2136
dde7e6d1 2137 /* TODO: Add stack limit check */
38ba30ba 2138
9dac77fa 2139 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
38ba30ba 2140
dde7e6d1
AK
2141 if (rc != X86EMUL_CONTINUE)
2142 return rc;
38ba30ba 2143
35d3d4a1
AK
2144 if (temp_eip & ~0xffff)
2145 return emulate_gp(ctxt, 0);
38ba30ba 2146
9dac77fa 2147 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
38ba30ba 2148
dde7e6d1
AK
2149 if (rc != X86EMUL_CONTINUE)
2150 return rc;
38ba30ba 2151
9dac77fa 2152 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
38ba30ba 2153
dde7e6d1
AK
2154 if (rc != X86EMUL_CONTINUE)
2155 return rc;
38ba30ba 2156
7b105ca2 2157 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
38ba30ba 2158
dde7e6d1
AK
2159 if (rc != X86EMUL_CONTINUE)
2160 return rc;
38ba30ba 2161
9dac77fa 2162 ctxt->_eip = temp_eip;
38ba30ba 2163
9dac77fa 2164 if (ctxt->op_bytes == 4)
dde7e6d1 2165 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
9dac77fa 2166 else if (ctxt->op_bytes == 2) {
dde7e6d1
AK
2167 ctxt->eflags &= ~0xffff;
2168 ctxt->eflags |= temp_eflags;
38ba30ba 2169 }
dde7e6d1
AK
2170
2171 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
35fd68a3 2172 ctxt->eflags |= X86_EFLAGS_FIXED;
801806d9 2173 ctxt->ops->set_nmi_mask(ctxt, false);
dde7e6d1
AK
2174
2175 return rc;
38ba30ba
GN
2176}
2177
e01991e7 2178static int em_iret(struct x86_emulate_ctxt *ctxt)
c37eda13 2179{
dde7e6d1
AK
2180 switch(ctxt->mode) {
2181 case X86EMUL_MODE_REAL:
7b105ca2 2182 return emulate_iret_real(ctxt);
dde7e6d1
AK
2183 case X86EMUL_MODE_VM86:
2184 case X86EMUL_MODE_PROT16:
2185 case X86EMUL_MODE_PROT32:
2186 case X86EMUL_MODE_PROT64:
c37eda13 2187 default:
dde7e6d1
AK
2188 /* iret from protected mode unimplemented yet */
2189 return X86EMUL_UNHANDLEABLE;
c37eda13 2190 }
c37eda13
WY
2191}
2192
d2f62766
TY
2193static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2194{
d2f62766 2195 int rc;
2117d539
RK
2196 unsigned short sel;
2197 struct desc_struct new_desc;
d1442d85
NA
2198 u8 cpl = ctxt->ops->cpl(ctxt);
2199
9dac77fa 2200 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
d2f62766 2201
3dc4bc4f
NA
2202 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2203 X86_TRANSFER_CALL_JMP,
d1442d85 2204 &new_desc);
d2f62766
TY
2205 if (rc != X86EMUL_CONTINUE)
2206 return rc;
2207
d50eaa18 2208 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2117d539
RK
2209 /* Error handling is not implemented. */
2210 if (rc != X86EMUL_CONTINUE)
2211 return X86EMUL_UNHANDLEABLE;
2212
d1442d85 2213 return rc;
d2f62766
TY
2214}
2215
f7784046 2216static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
8cdbd2c9 2217{
f7784046
NA
2218 return assign_eip_near(ctxt, ctxt->src.val);
2219}
8cdbd2c9 2220
f7784046
NA
2221static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2222{
2223 int rc;
2224 long int old_eip;
2225
2226 old_eip = ctxt->_eip;
2227 rc = assign_eip_near(ctxt, ctxt->src.val);
2228 if (rc != X86EMUL_CONTINUE)
2229 return rc;
2230 ctxt->src.val = old_eip;
2231 rc = em_push(ctxt);
4179bb02 2232 return rc;
8cdbd2c9
LV
2233}
2234
e0dac408 2235static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
8cdbd2c9 2236{
9dac77fa 2237 u64 old = ctxt->dst.orig_val64;
8cdbd2c9 2238
aaa05f24
NA
2239 if (ctxt->dst.bytes == 16)
2240 return X86EMUL_UNHANDLEABLE;
2241
dd856efa
AK
2242 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2243 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2244 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2245 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
0efb0440 2246 ctxt->eflags &= ~X86_EFLAGS_ZF;
8cdbd2c9 2247 } else {
dd856efa
AK
2248 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2249 (u32) reg_read(ctxt, VCPU_REGS_RBX);
8cdbd2c9 2250
0efb0440 2251 ctxt->eflags |= X86_EFLAGS_ZF;
8cdbd2c9 2252 }
1b30eaa8 2253 return X86EMUL_CONTINUE;
8cdbd2c9
LV
2254}
2255
ebda02c2
TY
2256static int em_ret(struct x86_emulate_ctxt *ctxt)
2257{
234f3ce4
NA
2258 int rc;
2259 unsigned long eip;
2260
2261 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2262 if (rc != X86EMUL_CONTINUE)
2263 return rc;
2264
2265 return assign_eip_near(ctxt, eip);
ebda02c2
TY
2266}
2267
e01991e7 2268static int em_ret_far(struct x86_emulate_ctxt *ctxt)
a77ab5ea 2269{
a77ab5ea 2270 int rc;
d1442d85 2271 unsigned long eip, cs;
9e8919ae 2272 int cpl = ctxt->ops->cpl(ctxt);
2117d539 2273 struct desc_struct new_desc;
a77ab5ea 2274
d1442d85 2275 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
1b30eaa8 2276 if (rc != X86EMUL_CONTINUE)
a77ab5ea 2277 return rc;
9dac77fa 2278 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1b30eaa8 2279 if (rc != X86EMUL_CONTINUE)
a77ab5ea 2280 return rc;
9e8919ae
NA
2281 /* Outer-privilege level return is not implemented */
2282 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2283 return X86EMUL_UNHANDLEABLE;
3dc4bc4f
NA
2284 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2285 X86_TRANSFER_RET,
d1442d85
NA
2286 &new_desc);
2287 if (rc != X86EMUL_CONTINUE)
2288 return rc;
d50eaa18 2289 rc = assign_eip_far(ctxt, eip, &new_desc);
2117d539
RK
2290 /* Error handling is not implemented. */
2291 if (rc != X86EMUL_CONTINUE)
2292 return X86EMUL_UNHANDLEABLE;
2293
a77ab5ea
AK
2294 return rc;
2295}
2296
3261107e
BR
2297static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2298{
2299 int rc;
2300
2301 rc = em_ret_far(ctxt);
2302 if (rc != X86EMUL_CONTINUE)
2303 return rc;
2304 rsp_increment(ctxt, ctxt->src.val);
2305 return X86EMUL_CONTINUE;
2306}
2307
e940b5c2
TY
2308static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2309{
2310 /* Save real source value, then compare EAX against destination. */
37c564f2
NA
2311 ctxt->dst.orig_val = ctxt->dst.val;
2312 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
e940b5c2 2313 ctxt->src.orig_val = ctxt->src.val;
37c564f2 2314 ctxt->src.val = ctxt->dst.orig_val;
158de57f 2315 fastop(ctxt, em_cmp);
e940b5c2 2316
0efb0440 2317 if (ctxt->eflags & X86_EFLAGS_ZF) {
2fcf5c8a
NA
2318 /* Success: write back to memory; no update of EAX */
2319 ctxt->src.type = OP_NONE;
e940b5c2
TY
2320 ctxt->dst.val = ctxt->src.orig_val;
2321 } else {
2322 /* Failure: write the value we saw to EAX. */
2fcf5c8a
NA
2323 ctxt->src.type = OP_REG;
2324 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2325 ctxt->src.val = ctxt->dst.orig_val;
2326 /* Create write-cycle to dest by writing the same value */
37c564f2 2327 ctxt->dst.val = ctxt->dst.orig_val;
e940b5c2
TY
2328 }
2329 return X86EMUL_CONTINUE;
2330}
2331
d4b4325f 2332static int em_lseg(struct x86_emulate_ctxt *ctxt)
09b5f4d3 2333{
d4b4325f 2334 int seg = ctxt->src2.val;
09b5f4d3
WY
2335 unsigned short sel;
2336 int rc;
2337
9dac77fa 2338 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
09b5f4d3 2339
7b105ca2 2340 rc = load_segment_descriptor(ctxt, sel, seg);
09b5f4d3
WY
2341 if (rc != X86EMUL_CONTINUE)
2342 return rc;
2343
9dac77fa 2344 ctxt->dst.val = ctxt->src.val;
09b5f4d3
WY
2345 return rc;
2346}
2347
660a5d51
PB
2348static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
2349{
b68f3cc7 2350#ifdef CONFIG_X86_64
660a5d51
PB
2351 u32 eax, ebx, ecx, edx;
2352
2353 eax = 0x80000001;
2354 ecx = 0;
e911eb3b 2355 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
660a5d51 2356 return edx & bit(X86_FEATURE_LM);
b68f3cc7
SC
2357#else
2358 return false;
2359#endif
660a5d51
PB
2360}
2361
660a5d51
PB
2362static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
2363{
2364 desc->g = (flags >> 23) & 1;
2365 desc->d = (flags >> 22) & 1;
2366 desc->l = (flags >> 21) & 1;
2367 desc->avl = (flags >> 20) & 1;
2368 desc->p = (flags >> 15) & 1;
2369 desc->dpl = (flags >> 13) & 3;
2370 desc->s = (flags >> 12) & 1;
2371 desc->type = (flags >> 8) & 15;
2372}
2373
ed19321f
SC
2374static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate,
2375 int n)
660a5d51
PB
2376{
2377 struct desc_struct desc;
2378 int offset;
2379 u16 selector;
2380
ed19321f 2381 selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4);
660a5d51
PB
2382
2383 if (n < 3)
2384 offset = 0x7f84 + n * 12;
2385 else
2386 offset = 0x7f2c + (n - 3) * 12;
2387
ed19321f
SC
2388 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
2389 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
2390 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset));
660a5d51
PB
2391 ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
2392 return X86EMUL_CONTINUE;
2393}
2394
b68f3cc7 2395#ifdef CONFIG_X86_64
ed19321f
SC
2396static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate,
2397 int n)
660a5d51
PB
2398{
2399 struct desc_struct desc;
2400 int offset;
2401 u16 selector;
2402 u32 base3;
2403
2404 offset = 0x7e00 + n * 16;
2405
ed19321f
SC
2406 selector = GET_SMSTATE(u16, smstate, offset);
2407 rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8);
2408 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
2409 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
2410 base3 = GET_SMSTATE(u32, smstate, offset + 12);
660a5d51
PB
2411
2412 ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
2413 return X86EMUL_CONTINUE;
2414}
b68f3cc7 2415#endif
660a5d51
PB
2416
2417static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
fae1a3e7 2418 u64 cr0, u64 cr3, u64 cr4)
660a5d51
PB
2419{
2420 int bad;
fae1a3e7
PB
2421 u64 pcid;
2422
2423 /* In order to later set CR4.PCIDE, CR3[11:0] must be zero. */
2424 pcid = 0;
2425 if (cr4 & X86_CR4_PCIDE) {
2426 pcid = cr3 & 0xfff;
2427 cr3 &= ~0xfff;
2428 }
2429
2430 bad = ctxt->ops->set_cr(ctxt, 3, cr3);
2431 if (bad)
2432 return X86EMUL_UNHANDLEABLE;
660a5d51
PB
2433
2434 /*
2435 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
2436 * Then enable protected mode. However, PCID cannot be enabled
2437 * if EFER.LMA=0, so set it separately.
2438 */
2439 bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2440 if (bad)
2441 return X86EMUL_UNHANDLEABLE;
2442
2443 bad = ctxt->ops->set_cr(ctxt, 0, cr0);
2444 if (bad)
2445 return X86EMUL_UNHANDLEABLE;
2446
2447 if (cr4 & X86_CR4_PCIDE) {
2448 bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2449 if (bad)
2450 return X86EMUL_UNHANDLEABLE;
fae1a3e7
PB
2451 if (pcid) {
2452 bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
2453 if (bad)
2454 return X86EMUL_UNHANDLEABLE;
2455 }
2456
660a5d51
PB
2457 }
2458
2459 return X86EMUL_CONTINUE;
2460}
2461
ed19321f
SC
2462static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
2463 const char *smstate)
660a5d51
PB
2464{
2465 struct desc_struct desc;
2466 struct desc_ptr dt;
2467 u16 selector;
fae1a3e7 2468 u32 val, cr0, cr3, cr4;
660a5d51
PB
2469 int i;
2470
ed19321f
SC
2471 cr0 = GET_SMSTATE(u32, smstate, 0x7ffc);
2472 cr3 = GET_SMSTATE(u32, smstate, 0x7ff8);
2473 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED;
2474 ctxt->_eip = GET_SMSTATE(u32, smstate, 0x7ff0);
660a5d51
PB
2475
2476 for (i = 0; i < 8; i++)
ed19321f 2477 *reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
660a5d51 2478
ed19321f 2479 val = GET_SMSTATE(u32, smstate, 0x7fcc);
660a5d51 2480 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
ed19321f 2481 val = GET_SMSTATE(u32, smstate, 0x7fc8);
660a5d51
PB
2482 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2483
ed19321f
SC
2484 selector = GET_SMSTATE(u32, smstate, 0x7fc4);
2485 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64));
2486 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f60));
2487 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f5c));
660a5d51
PB
2488 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
2489
ed19321f
SC
2490 selector = GET_SMSTATE(u32, smstate, 0x7fc0);
2491 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f80));
2492 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f7c));
2493 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f78));
660a5d51
PB
2494 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
2495
ed19321f
SC
2496 dt.address = GET_SMSTATE(u32, smstate, 0x7f74);
2497 dt.size = GET_SMSTATE(u32, smstate, 0x7f70);
660a5d51
PB
2498 ctxt->ops->set_gdt(ctxt, &dt);
2499
ed19321f
SC
2500 dt.address = GET_SMSTATE(u32, smstate, 0x7f58);
2501 dt.size = GET_SMSTATE(u32, smstate, 0x7f54);
660a5d51
PB
2502 ctxt->ops->set_idt(ctxt, &dt);
2503
2504 for (i = 0; i < 6; i++) {
ed19321f 2505 int r = rsm_load_seg_32(ctxt, smstate, i);
660a5d51
PB
2506 if (r != X86EMUL_CONTINUE)
2507 return r;
2508 }
2509
ed19321f 2510 cr4 = GET_SMSTATE(u32, smstate, 0x7f14);
660a5d51 2511
ed19321f 2512 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8));
660a5d51 2513
fae1a3e7 2514 return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
660a5d51
PB
2515}
2516
b68f3cc7 2517#ifdef CONFIG_X86_64
ed19321f
SC
2518static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
2519 const char *smstate)
660a5d51
PB
2520{
2521 struct desc_struct desc;
2522 struct desc_ptr dt;
fae1a3e7 2523 u64 val, cr0, cr3, cr4;
660a5d51
PB
2524 u32 base3;
2525 u16 selector;
b10d92a5 2526 int i, r;
660a5d51
PB
2527
2528 for (i = 0; i < 16; i++)
ed19321f 2529 *reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8);
660a5d51 2530
ed19321f
SC
2531 ctxt->_eip = GET_SMSTATE(u64, smstate, 0x7f78);
2532 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
660a5d51 2533
ed19321f 2534 val = GET_SMSTATE(u32, smstate, 0x7f68);
660a5d51 2535 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
ed19321f 2536 val = GET_SMSTATE(u32, smstate, 0x7f60);
660a5d51
PB
2537 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2538
ed19321f
SC
2539 cr0 = GET_SMSTATE(u64, smstate, 0x7f58);
2540 cr3 = GET_SMSTATE(u64, smstate, 0x7f50);
2541 cr4 = GET_SMSTATE(u64, smstate, 0x7f48);
2542 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
2543 val = GET_SMSTATE(u64, smstate, 0x7ed0);
660a5d51
PB
2544 ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
2545
ed19321f
SC
2546 selector = GET_SMSTATE(u32, smstate, 0x7e90);
2547 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8);
2548 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e94));
2549 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e98));
2550 base3 = GET_SMSTATE(u32, smstate, 0x7e9c);
660a5d51
PB
2551 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
2552
ed19321f
SC
2553 dt.size = GET_SMSTATE(u32, smstate, 0x7e84);
2554 dt.address = GET_SMSTATE(u64, smstate, 0x7e88);
660a5d51
PB
2555 ctxt->ops->set_idt(ctxt, &dt);
2556
ed19321f
SC
2557 selector = GET_SMSTATE(u32, smstate, 0x7e70);
2558 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e72) << 8);
2559 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e74));
2560 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e78));
2561 base3 = GET_SMSTATE(u32, smstate, 0x7e7c);
660a5d51
PB
2562 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
2563
ed19321f
SC
2564 dt.size = GET_SMSTATE(u32, smstate, 0x7e64);
2565 dt.address = GET_SMSTATE(u64, smstate, 0x7e68);
660a5d51
PB
2566 ctxt->ops->set_gdt(ctxt, &dt);
2567
fae1a3e7 2568 r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
b10d92a5
PB
2569 if (r != X86EMUL_CONTINUE)
2570 return r;
2571
660a5d51 2572 for (i = 0; i < 6; i++) {
ed19321f 2573 r = rsm_load_seg_64(ctxt, smstate, i);
660a5d51
PB
2574 if (r != X86EMUL_CONTINUE)
2575 return r;
2576 }
2577
b10d92a5 2578 return X86EMUL_CONTINUE;
660a5d51 2579}
b68f3cc7 2580#endif
660a5d51 2581
64d60670
PB
2582static int em_rsm(struct x86_emulate_ctxt *ctxt)
2583{
660a5d51 2584 unsigned long cr0, cr4, efer;
ed19321f 2585 char buf[512];
660a5d51
PB
2586 u64 smbase;
2587 int ret;
2588
6ed071f0 2589 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
64d60670
PB
2590 return emulate_ud(ctxt);
2591
ed19321f
SC
2592 smbase = ctxt->ops->get_smbase(ctxt);
2593
2594 ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf));
2595 if (ret != X86EMUL_CONTINUE)
2596 return X86EMUL_UNHANDLEABLE;
2597
9ec19493
SC
2598 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2599 ctxt->ops->set_nmi_mask(ctxt, false);
2600
2601 ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
2602 ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
2603
660a5d51
PB
2604 /*
2605 * Get back to real mode, to prepare a safe state in which to load
89651a3d
PB
2606 * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
2607 * supports long mode.
660a5d51 2608 */
89651a3d
PB
2609 if (emulator_has_longmode(ctxt)) {
2610 struct desc_struct cs_desc;
2611
2612 /* Zero CR4.PCIDE before CR0.PG. */
8f4dc2e7
SC
2613 cr4 = ctxt->ops->get_cr(ctxt, 4);
2614 if (cr4 & X86_CR4_PCIDE)
89651a3d 2615 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
89651a3d
PB
2616
2617 /* A 32-bit code segment is required to clear EFER.LMA. */
2618 memset(&cs_desc, 0, sizeof(cs_desc));
2619 cs_desc.type = 0xb;
2620 cs_desc.s = cs_desc.g = cs_desc.p = 1;
2621 ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
2622 }
2623
2624 /* For the 64-bit case, this will clear EFER.LMA. */
660a5d51
PB
2625 cr0 = ctxt->ops->get_cr(ctxt, 0);
2626 if (cr0 & X86_CR0_PE)
2627 ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
89651a3d 2628
8f4dc2e7
SC
2629 if (emulator_has_longmode(ctxt)) {
2630 /* Clear CR4.PAE before clearing EFER.LME. */
2631 cr4 = ctxt->ops->get_cr(ctxt, 4);
2632 if (cr4 & X86_CR4_PAE)
2633 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2634
2635 /* And finally go back to 32-bit mode. */
2636 efer = 0;
2637 ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2638 }
660a5d51 2639
0234bf88
LP
2640 /*
2641 * Give pre_leave_smm() a chance to make ISA-specific changes to the
2642 * vCPU state (e.g. enter guest mode) before loading state from the SMM
2643 * state-save area.
2644 */
ed19321f 2645 if (ctxt->ops->pre_leave_smm(ctxt, buf))
0234bf88
LP
2646 return X86EMUL_UNHANDLEABLE;
2647
b68f3cc7 2648#ifdef CONFIG_X86_64
660a5d51 2649 if (emulator_has_longmode(ctxt))
ed19321f 2650 ret = rsm_load_state_64(ctxt, buf);
660a5d51 2651 else
b68f3cc7 2652#endif
ed19321f 2653 ret = rsm_load_state_32(ctxt, buf);
660a5d51
PB
2654
2655 if (ret != X86EMUL_CONTINUE) {
2656 /* FIXME: should triple fault */
2657 return X86EMUL_UNHANDLEABLE;
2658 }
2659
c5833c7a
SC
2660 ctxt->ops->post_leave_smm(ctxt);
2661
660a5d51 2662 return X86EMUL_CONTINUE;
64d60670
PB
2663}
2664
7b105ca2 2665static void
e66bb2cc 2666setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
7b105ca2 2667 struct desc_struct *cs, struct desc_struct *ss)
e66bb2cc 2668{
e66bb2cc 2669 cs->l = 0; /* will be adjusted later */
79168fd1 2670 set_desc_base(cs, 0); /* flat segment */
e66bb2cc 2671 cs->g = 1; /* 4kb granularity */
79168fd1 2672 set_desc_limit(cs, 0xfffff); /* 4GB limit */
e66bb2cc
AP
2673 cs->type = 0x0b; /* Read, Execute, Accessed */
2674 cs->s = 1;
2675 cs->dpl = 0; /* will be adjusted later */
79168fd1
GN
2676 cs->p = 1;
2677 cs->d = 1;
99245b50 2678 cs->avl = 0;
e66bb2cc 2679
79168fd1
GN
2680 set_desc_base(ss, 0); /* flat segment */
2681 set_desc_limit(ss, 0xfffff); /* 4GB limit */
e66bb2cc
AP
2682 ss->g = 1; /* 4kb granularity */
2683 ss->s = 1;
2684 ss->type = 0x03; /* Read/Write, Accessed */
79168fd1 2685 ss->d = 1; /* 32bit stack segment */
e66bb2cc 2686 ss->dpl = 0;
79168fd1 2687 ss->p = 1;
99245b50
GN
2688 ss->l = 0;
2689 ss->avl = 0;
e66bb2cc
AP
2690}
2691
1a18a69b
AK
2692static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2693{
2694 u32 eax, ebx, ecx, edx;
2695
2696 eax = ecx = 0;
e911eb3b 2697 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
0017f93a 2698 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
1a18a69b
AK
2699 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2700 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2701}
2702
c2226fc9
SB
2703static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2704{
0225fb50 2705 const struct x86_emulate_ops *ops = ctxt->ops;
c2226fc9
SB
2706 u32 eax, ebx, ecx, edx;
2707
2708 /*
2709 * syscall should always be enabled in longmode - so only become
2710 * vendor specific (cpuid) if other modes are active...
2711 */
2712 if (ctxt->mode == X86EMUL_MODE_PROT64)
2713 return true;
2714
2715 eax = 0x00000000;
2716 ecx = 0x00000000;
e911eb3b 2717 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
0017f93a
AK
2718 /*
2719 * Intel ("GenuineIntel")
2720 * remark: Intel CPUs only support "syscall" in 64bit
2721 * longmode. Also an 64bit guest with a
2722 * 32bit compat-app running will #UD !! While this
2723 * behaviour can be fixed (by emulating) into AMD
2724 * response - CPUs of AMD can't behave like Intel.
2725 */
2726 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2727 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2728 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2729 return false;
2730
2731 /* AMD ("AuthenticAMD") */
2732 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2733 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2734 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2735 return true;
2736
2737 /* AMD ("AMDisbetter!") */
2738 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2739 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2740 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2741 return true;
c2226fc9 2742
b8f4abb6
PW
2743 /* Hygon ("HygonGenuine") */
2744 if (ebx == X86EMUL_CPUID_VENDOR_HygonGenuine_ebx &&
2745 ecx == X86EMUL_CPUID_VENDOR_HygonGenuine_ecx &&
2746 edx == X86EMUL_CPUID_VENDOR_HygonGenuine_edx)
2747 return true;
2748
2749 /*
2750 * default: (not Intel, not AMD, not Hygon), apply Intel's
2751 * stricter rules...
2752 */
c2226fc9
SB
2753 return false;
2754}
2755
e01991e7 2756static int em_syscall(struct x86_emulate_ctxt *ctxt)
e66bb2cc 2757{
0225fb50 2758 const struct x86_emulate_ops *ops = ctxt->ops;
79168fd1 2759 struct desc_struct cs, ss;
e66bb2cc 2760 u64 msr_data;
79168fd1 2761 u16 cs_sel, ss_sel;
c2ad2bb3 2762 u64 efer = 0;
e66bb2cc
AP
2763
2764 /* syscall is not available in real mode */
2e901c4c 2765 if (ctxt->mode == X86EMUL_MODE_REAL ||
35d3d4a1
AK
2766 ctxt->mode == X86EMUL_MODE_VM86)
2767 return emulate_ud(ctxt);
e66bb2cc 2768
c2226fc9
SB
2769 if (!(em_syscall_is_enabled(ctxt)))
2770 return emulate_ud(ctxt);
2771
c2ad2bb3 2772 ops->get_msr(ctxt, MSR_EFER, &efer);
7b105ca2 2773 setup_syscalls_segments(ctxt, &cs, &ss);
e66bb2cc 2774
c2226fc9
SB
2775 if (!(efer & EFER_SCE))
2776 return emulate_ud(ctxt);
2777
717746e3 2778 ops->get_msr(ctxt, MSR_STAR, &msr_data);
e66bb2cc 2779 msr_data >>= 32;
79168fd1
GN
2780 cs_sel = (u16)(msr_data & 0xfffc);
2781 ss_sel = (u16)(msr_data + 8);
e66bb2cc 2782
c2ad2bb3 2783 if (efer & EFER_LMA) {
79168fd1 2784 cs.d = 0;
e66bb2cc
AP
2785 cs.l = 1;
2786 }
1aa36616
AK
2787 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2788 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
e66bb2cc 2789
dd856efa 2790 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
c2ad2bb3 2791 if (efer & EFER_LMA) {
e66bb2cc 2792#ifdef CONFIG_X86_64
6c6cb69b 2793 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
e66bb2cc 2794
717746e3 2795 ops->get_msr(ctxt,
3fb1b5db
GN
2796 ctxt->mode == X86EMUL_MODE_PROT64 ?
2797 MSR_LSTAR : MSR_CSTAR, &msr_data);
9dac77fa 2798 ctxt->_eip = msr_data;
e66bb2cc 2799
717746e3 2800 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
6c6cb69b 2801 ctxt->eflags &= ~msr_data;
35fd68a3 2802 ctxt->eflags |= X86_EFLAGS_FIXED;
e66bb2cc
AP
2803#endif
2804 } else {
2805 /* legacy mode */
717746e3 2806 ops->get_msr(ctxt, MSR_STAR, &msr_data);
9dac77fa 2807 ctxt->_eip = (u32)msr_data;
e66bb2cc 2808
0efb0440 2809 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
e66bb2cc
AP
2810 }
2811
c8401dda 2812 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
e54cfa97 2813 return X86EMUL_CONTINUE;
e66bb2cc
AP
2814}
2815
e01991e7 2816static int em_sysenter(struct x86_emulate_ctxt *ctxt)
8c604352 2817{
0225fb50 2818 const struct x86_emulate_ops *ops = ctxt->ops;
79168fd1 2819 struct desc_struct cs, ss;
8c604352 2820 u64 msr_data;
79168fd1 2821 u16 cs_sel, ss_sel;
c2ad2bb3 2822 u64 efer = 0;
8c604352 2823
7b105ca2 2824 ops->get_msr(ctxt, MSR_EFER, &efer);
a0044755 2825 /* inject #GP if in real mode */
35d3d4a1
AK
2826 if (ctxt->mode == X86EMUL_MODE_REAL)
2827 return emulate_gp(ctxt, 0);
8c604352 2828
1a18a69b
AK
2829 /*
2830 * Not recognized on AMD in compat mode (but is recognized in legacy
2831 * mode).
2832 */
f3747379 2833 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
1a18a69b
AK
2834 && !vendor_intel(ctxt))
2835 return emulate_ud(ctxt);
2836
b2c9d43e 2837 /* sysenter/sysexit have not been tested in 64bit mode. */
35d3d4a1 2838 if (ctxt->mode == X86EMUL_MODE_PROT64)
b2c9d43e 2839 return X86EMUL_UNHANDLEABLE;
8c604352 2840
7b105ca2 2841 setup_syscalls_segments(ctxt, &cs, &ss);
8c604352 2842
717746e3 2843 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
f3747379
NA
2844 if ((msr_data & 0xfffc) == 0x0)
2845 return emulate_gp(ctxt, 0);
8c604352 2846
0efb0440 2847 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
b32a9918 2848 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
79168fd1 2849 ss_sel = cs_sel + 8;
f3747379 2850 if (efer & EFER_LMA) {
79168fd1 2851 cs.d = 0;
8c604352
AP
2852 cs.l = 1;
2853 }
2854
1aa36616
AK
2855 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2856 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
8c604352 2857
717746e3 2858 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
f3747379 2859 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
8c604352 2860
717746e3 2861 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
f3747379
NA
2862 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2863 (u32)msr_data;
8c604352 2864
e54cfa97 2865 return X86EMUL_CONTINUE;
8c604352
AP
2866}
2867
e01991e7 2868static int em_sysexit(struct x86_emulate_ctxt *ctxt)
4668f050 2869{
0225fb50 2870 const struct x86_emulate_ops *ops = ctxt->ops;
79168fd1 2871 struct desc_struct cs, ss;
234f3ce4 2872 u64 msr_data, rcx, rdx;
4668f050 2873 int usermode;
1249b96e 2874 u16 cs_sel = 0, ss_sel = 0;
4668f050 2875
a0044755
GN
2876 /* inject #GP if in real mode or Virtual 8086 mode */
2877 if (ctxt->mode == X86EMUL_MODE_REAL ||
35d3d4a1
AK
2878 ctxt->mode == X86EMUL_MODE_VM86)
2879 return emulate_gp(ctxt, 0);
4668f050 2880
7b105ca2 2881 setup_syscalls_segments(ctxt, &cs, &ss);
4668f050 2882
9dac77fa 2883 if ((ctxt->rex_prefix & 0x8) != 0x0)
4668f050
AP
2884 usermode = X86EMUL_MODE_PROT64;
2885 else
2886 usermode = X86EMUL_MODE_PROT32;
2887
234f3ce4
NA
2888 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2889 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2890
4668f050
AP
2891 cs.dpl = 3;
2892 ss.dpl = 3;
717746e3 2893 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
4668f050
AP
2894 switch (usermode) {
2895 case X86EMUL_MODE_PROT32:
79168fd1 2896 cs_sel = (u16)(msr_data + 16);
35d3d4a1
AK
2897 if ((msr_data & 0xfffc) == 0x0)
2898 return emulate_gp(ctxt, 0);
79168fd1 2899 ss_sel = (u16)(msr_data + 24);
bf0b682c
NA
2900 rcx = (u32)rcx;
2901 rdx = (u32)rdx;
4668f050
AP
2902 break;
2903 case X86EMUL_MODE_PROT64:
79168fd1 2904 cs_sel = (u16)(msr_data + 32);
35d3d4a1
AK
2905 if (msr_data == 0x0)
2906 return emulate_gp(ctxt, 0);
79168fd1
GN
2907 ss_sel = cs_sel + 8;
2908 cs.d = 0;
4668f050 2909 cs.l = 1;
fd8cb433
YZ
2910 if (emul_is_noncanonical_address(rcx, ctxt) ||
2911 emul_is_noncanonical_address(rdx, ctxt))
234f3ce4 2912 return emulate_gp(ctxt, 0);
4668f050
AP
2913 break;
2914 }
b32a9918
NA
2915 cs_sel |= SEGMENT_RPL_MASK;
2916 ss_sel |= SEGMENT_RPL_MASK;
4668f050 2917
1aa36616
AK
2918 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2919 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
4668f050 2920
234f3ce4
NA
2921 ctxt->_eip = rdx;
2922 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
4668f050 2923
e54cfa97 2924 return X86EMUL_CONTINUE;
4668f050
AP
2925}
2926
7b105ca2 2927static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
f850e2e6
GN
2928{
2929 int iopl;
2930 if (ctxt->mode == X86EMUL_MODE_REAL)
2931 return false;
2932 if (ctxt->mode == X86EMUL_MODE_VM86)
2933 return true;
0efb0440 2934 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
7b105ca2 2935 return ctxt->ops->cpl(ctxt) > iopl;
f850e2e6
GN
2936}
2937
9a29d449
LA
2938#define VMWARE_PORT_VMPORT (0x5658)
2939#define VMWARE_PORT_VMRPC (0x5659)
2940
f850e2e6 2941static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
f850e2e6
GN
2942 u16 port, u16 len)
2943{
0225fb50 2944 const struct x86_emulate_ops *ops = ctxt->ops;
79168fd1 2945 struct desc_struct tr_seg;
5601d05b 2946 u32 base3;
f850e2e6 2947 int r;
1aa36616 2948 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
f850e2e6 2949 unsigned mask = (1 << len) - 1;
5601d05b 2950 unsigned long base;
f850e2e6 2951
9a29d449
LA
2952 /*
2953 * VMware allows access to these ports even if denied
2954 * by TSS I/O permission bitmap. Mimic behavior.
2955 */
2956 if (enable_vmware_backdoor &&
2957 ((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC)))
2958 return true;
2959
1aa36616 2960 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
79168fd1 2961 if (!tr_seg.p)
f850e2e6 2962 return false;
79168fd1 2963 if (desc_limit_scaled(&tr_seg) < 103)
f850e2e6 2964 return false;
5601d05b
GN
2965 base = get_desc_base(&tr_seg);
2966#ifdef CONFIG_X86_64
2967 base |= ((u64)base3) << 32;
2968#endif
3c9fa24c 2969 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
f850e2e6
GN
2970 if (r != X86EMUL_CONTINUE)
2971 return false;
79168fd1 2972 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
f850e2e6 2973 return false;
3c9fa24c 2974 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
f850e2e6
GN
2975 if (r != X86EMUL_CONTINUE)
2976 return false;
2977 if ((perm >> bit_idx) & mask)
2978 return false;
2979 return true;
2980}
2981
2982static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
f850e2e6
GN
2983 u16 port, u16 len)
2984{
4fc40f07
GN
2985 if (ctxt->perm_ok)
2986 return true;
2987
7b105ca2
TY
2988 if (emulator_bad_iopl(ctxt))
2989 if (!emulator_io_port_access_allowed(ctxt, port, len))
f850e2e6 2990 return false;
4fc40f07
GN
2991
2992 ctxt->perm_ok = true;
2993
f850e2e6
GN
2994 return true;
2995}
2996
428e3d08
NA
2997static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2998{
2999 /*
3000 * Intel CPUs mask the counter and pointers in quite strange
3001 * manner when ECX is zero due to REP-string optimizations.
3002 */
3003#ifdef CONFIG_X86_64
3004 if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
3005 return;
3006
3007 *reg_write(ctxt, VCPU_REGS_RCX) = 0;
3008
3009 switch (ctxt->b) {
3010 case 0xa4: /* movsb */
3011 case 0xa5: /* movsd/w */
3012 *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
3013 /* fall through */
3014 case 0xaa: /* stosb */
3015 case 0xab: /* stosd/w */
3016 *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
3017 }
3018#endif
3019}
3020
38ba30ba 3021static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
38ba30ba
GN
3022 struct tss_segment_16 *tss)
3023{
9dac77fa 3024 tss->ip = ctxt->_eip;
38ba30ba 3025 tss->flag = ctxt->eflags;
dd856efa
AK
3026 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
3027 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
3028 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
3029 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
3030 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
3031 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
3032 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
3033 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
38ba30ba 3034
1aa36616
AK
3035 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3036 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3037 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3038 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3039 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
38ba30ba
GN
3040}
3041
3042static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
38ba30ba
GN
3043 struct tss_segment_16 *tss)
3044{
38ba30ba 3045 int ret;
2356aaeb 3046 u8 cpl;
38ba30ba 3047
9dac77fa 3048 ctxt->_eip = tss->ip;
38ba30ba 3049 ctxt->eflags = tss->flag | 2;
dd856efa
AK
3050 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
3051 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
3052 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
3053 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
3054 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
3055 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
3056 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
3057 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
38ba30ba
GN
3058
3059 /*
3060 * SDM says that segment selectors are loaded before segment
3061 * descriptors
3062 */
1aa36616
AK
3063 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
3064 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3065 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3066 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3067 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
38ba30ba 3068
2356aaeb
PB
3069 cpl = tss->cs & 3;
3070
38ba30ba 3071 /*
fc058680 3072 * Now load segment descriptors. If fault happens at this stage
38ba30ba
GN
3073 * it is handled in a context of new task
3074 */
d1442d85 3075 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
3dc4bc4f 3076 X86_TRANSFER_TASK_SWITCH, NULL);
38ba30ba
GN
3077 if (ret != X86EMUL_CONTINUE)
3078 return ret;
d1442d85 3079 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3dc4bc4f 3080 X86_TRANSFER_TASK_SWITCH, NULL);
38ba30ba
GN
3081 if (ret != X86EMUL_CONTINUE)
3082 return ret;
d1442d85 3083 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3dc4bc4f 3084 X86_TRANSFER_TASK_SWITCH, NULL);
38ba30ba
GN
3085 if (ret != X86EMUL_CONTINUE)
3086 return ret;
d1442d85 3087 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3dc4bc4f 3088 X86_TRANSFER_TASK_SWITCH, NULL);
38ba30ba
GN
3089 if (ret != X86EMUL_CONTINUE)
3090 return ret;
d1442d85 3091 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3dc4bc4f 3092 X86_TRANSFER_TASK_SWITCH, NULL);
38ba30ba
GN
3093 if (ret != X86EMUL_CONTINUE)
3094 return ret;
3095
3096 return X86EMUL_CONTINUE;
3097}
3098
3099static int task_switch_16(struct x86_emulate_ctxt *ctxt,
38ba30ba
GN
3100 u16 tss_selector, u16 old_tss_sel,
3101 ulong old_tss_base, struct desc_struct *new_desc)
3102{
3103 struct tss_segment_16 tss_seg;
3104 int ret;
bcc55cba 3105 u32 new_tss_base = get_desc_base(new_desc);
38ba30ba 3106
0e96f31e 3107 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
db297e3d 3108 if (ret != X86EMUL_CONTINUE)
38ba30ba 3109 return ret;
38ba30ba 3110
7b105ca2 3111 save_state_to_tss16(ctxt, &tss_seg);
38ba30ba 3112
0e96f31e 3113 ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
db297e3d 3114 if (ret != X86EMUL_CONTINUE)
38ba30ba 3115 return ret;
38ba30ba 3116
0e96f31e 3117 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
db297e3d 3118 if (ret != X86EMUL_CONTINUE)
38ba30ba 3119 return ret;
38ba30ba
GN
3120
3121 if (old_tss_sel != 0xffff) {
3122 tss_seg.prev_task_link = old_tss_sel;
3123
79367a65
PB
3124 ret = linear_write_system(ctxt, new_tss_base,
3125 &tss_seg.prev_task_link,
0e96f31e 3126 sizeof(tss_seg.prev_task_link));
db297e3d 3127 if (ret != X86EMUL_CONTINUE)
38ba30ba 3128 return ret;
38ba30ba
GN
3129 }
3130
7b105ca2 3131 return load_state_from_tss16(ctxt, &tss_seg);
38ba30ba
GN
3132}
3133
3134static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
38ba30ba
GN
3135 struct tss_segment_32 *tss)
3136{
5c7411e2 3137 /* CR3 and ldt selector are not saved intentionally */
9dac77fa 3138 tss->eip = ctxt->_eip;
38ba30ba 3139 tss->eflags = ctxt->eflags;
dd856efa
AK
3140 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
3141 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
3142 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
3143 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
3144 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
3145 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
3146 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
3147 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
38ba30ba 3148
1aa36616
AK
3149 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3150 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3151 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3152 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3153 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
3154 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
38ba30ba
GN
3155}
3156
3157static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
38ba30ba
GN
3158 struct tss_segment_32 *tss)
3159{
38ba30ba 3160 int ret;
2356aaeb 3161 u8 cpl;
38ba30ba 3162
7b105ca2 3163 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
35d3d4a1 3164 return emulate_gp(ctxt, 0);
9dac77fa 3165 ctxt->_eip = tss->eip;
38ba30ba 3166 ctxt->eflags = tss->eflags | 2;
4cee4798
KW
3167
3168 /* General purpose registers */
dd856efa
AK
3169 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
3170 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
3171 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
3172 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
3173 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
3174 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
3175 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
3176 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
38ba30ba
GN
3177
3178 /*
3179 * SDM says that segment selectors are loaded before segment
2356aaeb
PB
3180 * descriptors. This is important because CPL checks will
3181 * use CS.RPL.
38ba30ba 3182 */
1aa36616
AK
3183 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
3184 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3185 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3186 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3187 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3188 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
3189 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
38ba30ba 3190
4cee4798
KW
3191 /*
3192 * If we're switching between Protected Mode and VM86, we need to make
3193 * sure to update the mode before loading the segment descriptors so
3194 * that the selectors are interpreted correctly.
4cee4798 3195 */
2356aaeb 3196 if (ctxt->eflags & X86_EFLAGS_VM) {
4cee4798 3197 ctxt->mode = X86EMUL_MODE_VM86;
2356aaeb
PB
3198 cpl = 3;
3199 } else {
4cee4798 3200 ctxt->mode = X86EMUL_MODE_PROT32;
2356aaeb
PB
3201 cpl = tss->cs & 3;
3202 }
4cee4798 3203
38ba30ba
GN
3204 /*
3205 * Now load segment descriptors. If fault happenes at this stage
3206 * it is handled in a context of new task
3207 */
d1442d85 3208 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3dc4bc4f 3209 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
38ba30ba
GN
3210 if (ret != X86EMUL_CONTINUE)
3211 return ret;
d1442d85 3212 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3dc4bc4f 3213 X86_TRANSFER_TASK_SWITCH, NULL);
38ba30ba
GN
3214 if (ret != X86EMUL_CONTINUE)
3215 return ret;
d1442d85 3216 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3dc4bc4f 3217 X86_TRANSFER_TASK_SWITCH, NULL);
38ba30ba
GN
3218 if (ret != X86EMUL_CONTINUE)
3219 return ret;
d1442d85 3220 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3dc4bc4f 3221 X86_TRANSFER_TASK_SWITCH, NULL);
38ba30ba
GN
3222 if (ret != X86EMUL_CONTINUE)
3223 return ret;
d1442d85 3224 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3dc4bc4f 3225 X86_TRANSFER_TASK_SWITCH, NULL);
38ba30ba
GN
3226 if (ret != X86EMUL_CONTINUE)
3227 return ret;
d1442d85 3228 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3dc4bc4f 3229 X86_TRANSFER_TASK_SWITCH, NULL);
38ba30ba
GN
3230 if (ret != X86EMUL_CONTINUE)
3231 return ret;
d1442d85 3232 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3dc4bc4f 3233 X86_TRANSFER_TASK_SWITCH, NULL);
38ba30ba 3234
2f729b10 3235 return ret;
38ba30ba
GN
3236}
3237
3238static int task_switch_32(struct x86_emulate_ctxt *ctxt,
38ba30ba
GN
3239 u16 tss_selector, u16 old_tss_sel,
3240 ulong old_tss_base, struct desc_struct *new_desc)
3241{
3242 struct tss_segment_32 tss_seg;
3243 int ret;
bcc55cba 3244 u32 new_tss_base = get_desc_base(new_desc);
5c7411e2
NA
3245 u32 eip_offset = offsetof(struct tss_segment_32, eip);
3246 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
38ba30ba 3247
0e96f31e 3248 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
db297e3d 3249 if (ret != X86EMUL_CONTINUE)
38ba30ba 3250 return ret;
38ba30ba 3251
7b105ca2 3252 save_state_to_tss32(ctxt, &tss_seg);
38ba30ba 3253
5c7411e2 3254 /* Only GP registers and segment selectors are saved */
79367a65
PB
3255 ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3256 ldt_sel_offset - eip_offset);
db297e3d 3257 if (ret != X86EMUL_CONTINUE)
38ba30ba 3258 return ret;
38ba30ba 3259
0e96f31e 3260 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
db297e3d 3261 if (ret != X86EMUL_CONTINUE)
38ba30ba 3262 return ret;
38ba30ba
GN
3263
3264 if (old_tss_sel != 0xffff) {
3265 tss_seg.prev_task_link = old_tss_sel;
3266
79367a65
PB
3267 ret = linear_write_system(ctxt, new_tss_base,
3268 &tss_seg.prev_task_link,
0e96f31e 3269 sizeof(tss_seg.prev_task_link));
db297e3d 3270 if (ret != X86EMUL_CONTINUE)
38ba30ba 3271 return ret;
38ba30ba
GN
3272 }
3273
7b105ca2 3274 return load_state_from_tss32(ctxt, &tss_seg);
38ba30ba
GN
3275}
3276
3277static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
7f3d35fd 3278 u16 tss_selector, int idt_index, int reason,
e269fb21 3279 bool has_error_code, u32 error_code)
38ba30ba 3280{
0225fb50 3281 const struct x86_emulate_ops *ops = ctxt->ops;
38ba30ba
GN
3282 struct desc_struct curr_tss_desc, next_tss_desc;
3283 int ret;
1aa36616 3284 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
38ba30ba 3285 ulong old_tss_base =
4bff1e86 3286 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
ceffb459 3287 u32 desc_limit;
3db176d5 3288 ulong desc_addr, dr7;
38ba30ba
GN
3289
3290 /* FIXME: old_tss_base == ~0 ? */
3291
e919464b 3292 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
38ba30ba
GN
3293 if (ret != X86EMUL_CONTINUE)
3294 return ret;
e919464b 3295 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
38ba30ba
GN
3296 if (ret != X86EMUL_CONTINUE)
3297 return ret;
3298
3299 /* FIXME: check that next_tss_desc is tss */
3300
7f3d35fd
KW
3301 /*
3302 * Check privileges. The three cases are task switch caused by...
3303 *
3304 * 1. jmp/call/int to task gate: Check against DPL of the task gate
3305 * 2. Exception/IRQ/iret: No check is performed
2c2ca2d1
NA
3306 * 3. jmp/call to TSS/task-gate: No check is performed since the
3307 * hardware checks it before exiting.
7f3d35fd
KW
3308 */
3309 if (reason == TASK_SWITCH_GATE) {
3310 if (idt_index != -1) {
3311 /* Software interrupts */
3312 struct desc_struct task_gate_desc;
3313 int dpl;
3314
3315 ret = read_interrupt_descriptor(ctxt, idt_index,
3316 &task_gate_desc);
3317 if (ret != X86EMUL_CONTINUE)
3318 return ret;
3319
3320 dpl = task_gate_desc.dpl;
3321 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
3322 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
3323 }
38ba30ba
GN
3324 }
3325
ceffb459
GN
3326 desc_limit = desc_limit_scaled(&next_tss_desc);
3327 if (!next_tss_desc.p ||
3328 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
3329 desc_limit < 0x2b)) {
592f0858 3330 return emulate_ts(ctxt, tss_selector & 0xfffc);
38ba30ba
GN
3331 }
3332
3333 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3334 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
7b105ca2 3335 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
38ba30ba
GN
3336 }
3337
3338 if (reason == TASK_SWITCH_IRET)
3339 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
3340
3341 /* set back link to prev task only if NT bit is set in eflags
fc058680 3342 note that old_tss_sel is not used after this point */
38ba30ba
GN
3343 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
3344 old_tss_sel = 0xffff;
3345
3346 if (next_tss_desc.type & 8)
7b105ca2 3347 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
38ba30ba
GN
3348 old_tss_base, &next_tss_desc);
3349 else
7b105ca2 3350 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
38ba30ba 3351 old_tss_base, &next_tss_desc);
0760d448
JK
3352 if (ret != X86EMUL_CONTINUE)
3353 return ret;
38ba30ba
GN
3354
3355 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
3356 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3357
3358 if (reason != TASK_SWITCH_IRET) {
3359 next_tss_desc.type |= (1 << 1); /* set busy flag */
7b105ca2 3360 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
38ba30ba
GN
3361 }
3362
717746e3 3363 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
1aa36616 3364 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
38ba30ba 3365
e269fb21 3366 if (has_error_code) {
9dac77fa
AK
3367 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3368 ctxt->lock_prefix = 0;
3369 ctxt->src.val = (unsigned long) error_code;
4487b3b4 3370 ret = em_push(ctxt);
e269fb21
JK
3371 }
3372
3db176d5
NA
3373 ops->get_dr(ctxt, 7, &dr7);
3374 ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3375
38ba30ba
GN
3376 return ret;
3377}
3378
3379int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
7f3d35fd 3380 u16 tss_selector, int idt_index, int reason,
e269fb21 3381 bool has_error_code, u32 error_code)
38ba30ba 3382{
38ba30ba
GN
3383 int rc;
3384
dd856efa 3385 invalidate_registers(ctxt);
9dac77fa
AK
3386 ctxt->_eip = ctxt->eip;
3387 ctxt->dst.type = OP_NONE;
38ba30ba 3388
7f3d35fd 3389 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
e269fb21 3390 has_error_code, error_code);
38ba30ba 3391
dd856efa 3392 if (rc == X86EMUL_CONTINUE) {
9dac77fa 3393 ctxt->eip = ctxt->_eip;
dd856efa
AK
3394 writeback_registers(ctxt);
3395 }
38ba30ba 3396
a0c0ab2f 3397 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
38ba30ba
GN
3398}
3399
f3bd64c6
GN
3400static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3401 struct operand *op)
a682e354 3402{
0efb0440 3403 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
a682e354 3404
01485a22
PB
3405 register_address_increment(ctxt, reg, df * op->bytes);
3406 op->addr.mem.ea = register_address(ctxt, reg);
a682e354
GN
3407}
3408
7af04fc0
AK
3409static int em_das(struct x86_emulate_ctxt *ctxt)
3410{
7af04fc0
AK
3411 u8 al, old_al;
3412 bool af, cf, old_cf;
3413
3414 cf = ctxt->eflags & X86_EFLAGS_CF;
9dac77fa 3415 al = ctxt->dst.val;
7af04fc0
AK
3416
3417 old_al = al;
3418 old_cf = cf;
3419 cf = false;
3420 af = ctxt->eflags & X86_EFLAGS_AF;
3421 if ((al & 0x0f) > 9 || af) {
3422 al -= 6;
3423 cf = old_cf | (al >= 250);
3424 af = true;
3425 } else {
3426 af = false;
3427 }
3428 if (old_al > 0x99 || old_cf) {
3429 al -= 0x60;
3430 cf = true;
3431 }
3432
9dac77fa 3433 ctxt->dst.val = al;
7af04fc0 3434 /* Set PF, ZF, SF */
9dac77fa
AK
3435 ctxt->src.type = OP_IMM;
3436 ctxt->src.val = 0;
3437 ctxt->src.bytes = 1;
158de57f 3438 fastop(ctxt, em_or);
7af04fc0
AK
3439 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3440 if (cf)
3441 ctxt->eflags |= X86_EFLAGS_CF;
3442 if (af)
3443 ctxt->eflags |= X86_EFLAGS_AF;
3444 return X86EMUL_CONTINUE;
3445}
3446
a035d5c6
PB
3447static int em_aam(struct x86_emulate_ctxt *ctxt)
3448{
3449 u8 al, ah;
3450
3451 if (ctxt->src.val == 0)
3452 return emulate_de(ctxt);
3453
3454 al = ctxt->dst.val & 0xff;
3455 ah = al / ctxt->src.val;
3456 al %= ctxt->src.val;
3457
3458 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3459
3460 /* Set PF, ZF, SF */
3461 ctxt->src.type = OP_IMM;
3462 ctxt->src.val = 0;
3463 ctxt->src.bytes = 1;
3464 fastop(ctxt, em_or);
3465
3466 return X86EMUL_CONTINUE;
3467}
3468
7f662273
GN
3469static int em_aad(struct x86_emulate_ctxt *ctxt)
3470{
3471 u8 al = ctxt->dst.val & 0xff;
3472 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3473
3474 al = (al + (ah * ctxt->src.val)) & 0xff;
3475
3476 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3477
f583c29b
GN
3478 /* Set PF, ZF, SF */
3479 ctxt->src.type = OP_IMM;
3480 ctxt->src.val = 0;
3481 ctxt->src.bytes = 1;
3482 fastop(ctxt, em_or);
7f662273
GN
3483
3484 return X86EMUL_CONTINUE;
3485}
3486
d4ddafcd
TY
3487static int em_call(struct x86_emulate_ctxt *ctxt)
3488{
234f3ce4 3489 int rc;
d4ddafcd
TY
3490 long rel = ctxt->src.val;
3491
3492 ctxt->src.val = (unsigned long)ctxt->_eip;
234f3ce4
NA
3493 rc = jmp_rel(ctxt, rel);
3494 if (rc != X86EMUL_CONTINUE)
3495 return rc;
d4ddafcd
TY
3496 return em_push(ctxt);
3497}
3498
0ef753b8
AK
3499static int em_call_far(struct x86_emulate_ctxt *ctxt)
3500{
0ef753b8
AK
3501 u16 sel, old_cs;
3502 ulong old_eip;
3503 int rc;
d1442d85
NA
3504 struct desc_struct old_desc, new_desc;
3505 const struct x86_emulate_ops *ops = ctxt->ops;
3506 int cpl = ctxt->ops->cpl(ctxt);
82268083 3507 enum x86emul_mode prev_mode = ctxt->mode;
0ef753b8 3508
9dac77fa 3509 old_eip = ctxt->_eip;
d1442d85 3510 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
0ef753b8 3511
9dac77fa 3512 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3dc4bc4f
NA
3513 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3514 X86_TRANSFER_CALL_JMP, &new_desc);
d1442d85 3515 if (rc != X86EMUL_CONTINUE)
80976dbb 3516 return rc;
0ef753b8 3517
d50eaa18 3518 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
d1442d85
NA
3519 if (rc != X86EMUL_CONTINUE)
3520 goto fail;
0ef753b8 3521
9dac77fa 3522 ctxt->src.val = old_cs;
4487b3b4 3523 rc = em_push(ctxt);
0ef753b8 3524 if (rc != X86EMUL_CONTINUE)
d1442d85 3525 goto fail;
0ef753b8 3526
9dac77fa 3527 ctxt->src.val = old_eip;
d1442d85
NA
3528 rc = em_push(ctxt);
3529 /* If we failed, we tainted the memory, but the very least we should
3530 restore cs */
82268083
NA
3531 if (rc != X86EMUL_CONTINUE) {
3532 pr_warn_once("faulting far call emulation tainted memory\n");
d1442d85 3533 goto fail;
82268083 3534 }
d1442d85
NA
3535 return rc;
3536fail:
3537 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
82268083 3538 ctxt->mode = prev_mode;
d1442d85
NA
3539 return rc;
3540
0ef753b8
AK
3541}
3542
40ece7c7
AK
3543static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3544{
40ece7c7 3545 int rc;
234f3ce4 3546 unsigned long eip;
40ece7c7 3547
234f3ce4
NA
3548 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3549 if (rc != X86EMUL_CONTINUE)
3550 return rc;
3551 rc = assign_eip_near(ctxt, eip);
40ece7c7
AK
3552 if (rc != X86EMUL_CONTINUE)
3553 return rc;
5ad105e5 3554 rsp_increment(ctxt, ctxt->src.val);
40ece7c7
AK
3555 return X86EMUL_CONTINUE;
3556}
3557
e4f973ae
TY
3558static int em_xchg(struct x86_emulate_ctxt *ctxt)
3559{
e4f973ae 3560 /* Write back the register source. */
9dac77fa
AK
3561 ctxt->src.val = ctxt->dst.val;
3562 write_register_operand(&ctxt->src);
e4f973ae
TY
3563
3564 /* Write back the memory destination with implicit LOCK prefix. */
9dac77fa
AK
3565 ctxt->dst.val = ctxt->src.orig_val;
3566 ctxt->lock_prefix = 1;
e4f973ae
TY
3567 return X86EMUL_CONTINUE;
3568}
3569
5c82aa29
AK
3570static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3571{
9dac77fa 3572 ctxt->dst.val = ctxt->src2.val;
4d758349 3573 return fastop(ctxt, em_imul);
5c82aa29
AK
3574}
3575
61429142
AK
3576static int em_cwd(struct x86_emulate_ctxt *ctxt)
3577{
9dac77fa
AK
3578 ctxt->dst.type = OP_REG;
3579 ctxt->dst.bytes = ctxt->src.bytes;
dd856efa 3580 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
9dac77fa 3581 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
61429142
AK
3582
3583 return X86EMUL_CONTINUE;
3584}
3585
fb6d4d34
PB
3586static int em_rdpid(struct x86_emulate_ctxt *ctxt)
3587{
3588 u64 tsc_aux = 0;
3589
3590 if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
3591 return emulate_gp(ctxt, 0);
3592 ctxt->dst.val = tsc_aux;
3593 return X86EMUL_CONTINUE;
3594}
3595
48bb5d3c
AK
3596static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3597{
48bb5d3c
AK
3598 u64 tsc = 0;
3599
717746e3 3600 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
dd856efa
AK
3601 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3602 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
48bb5d3c
AK
3603 return X86EMUL_CONTINUE;
3604}
3605
222d21aa
AK
3606static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3607{
3608 u64 pmc;
3609
dd856efa 3610 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
222d21aa 3611 return emulate_gp(ctxt, 0);
dd856efa
AK
3612 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3613 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
222d21aa
AK
3614 return X86EMUL_CONTINUE;
3615}
3616
b9eac5f4
AK
3617static int em_mov(struct x86_emulate_ctxt *ctxt)
3618{
54cfdb3e 3619 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
b9eac5f4
AK
3620 return X86EMUL_CONTINUE;
3621}
3622
84cffe49
BP
3623#define FFL(x) bit(X86_FEATURE_##x)
3624
3625static int em_movbe(struct x86_emulate_ctxt *ctxt)
3626{
3627 u32 ebx, ecx, edx, eax = 1;
3628 u16 tmp;
3629
3630 /*
3631 * Check MOVBE is set in the guest-visible CPUID leaf.
3632 */
e911eb3b 3633 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
84cffe49
BP
3634 if (!(ecx & FFL(MOVBE)))
3635 return emulate_ud(ctxt);
3636
3637 switch (ctxt->op_bytes) {
3638 case 2:
3639 /*
3640 * From MOVBE definition: "...When the operand size is 16 bits,
3641 * the upper word of the destination register remains unchanged
3642 * ..."
3643 *
3644 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3645 * rules so we have to do the operation almost per hand.
3646 */
3647 tmp = (u16)ctxt->src.val;
3648 ctxt->dst.val &= ~0xffffUL;
3649 ctxt->dst.val |= (unsigned long)swab16(tmp);
3650 break;
3651 case 4:
3652 ctxt->dst.val = swab32((u32)ctxt->src.val);
3653 break;
3654 case 8:
3655 ctxt->dst.val = swab64(ctxt->src.val);
3656 break;
3657 default:
592f0858 3658 BUG();
84cffe49
BP
3659 }
3660 return X86EMUL_CONTINUE;
3661}
3662
bc00f8d2
TY
3663static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3664{
3665 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3666 return emulate_gp(ctxt, 0);
3667
3668 /* Disable writeback. */
3669 ctxt->dst.type = OP_NONE;
3670 return X86EMUL_CONTINUE;
3671}
3672
3673static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3674{
3675 unsigned long val;
3676
3677 if (ctxt->mode == X86EMUL_MODE_PROT64)
3678 val = ctxt->src.val & ~0ULL;
3679 else
3680 val = ctxt->src.val & ~0U;
3681
3682 /* #UD condition is already handled. */
3683 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3684 return emulate_gp(ctxt, 0);
3685
3686 /* Disable writeback. */
3687 ctxt->dst.type = OP_NONE;
3688 return X86EMUL_CONTINUE;
3689}
3690
e1e210b0
TY
3691static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3692{
3693 u64 msr_data;
3694
dd856efa
AK
3695 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3696 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3697 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
e1e210b0
TY
3698 return emulate_gp(ctxt, 0);
3699
3700 return X86EMUL_CONTINUE;
3701}
3702
3703static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3704{
3705 u64 msr_data;
3706
dd856efa 3707 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
e1e210b0
TY
3708 return emulate_gp(ctxt, 0);
3709
dd856efa
AK
3710 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3711 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
e1e210b0
TY
3712 return X86EMUL_CONTINUE;
3713}
3714
dd307d01 3715static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
1bd5f469 3716{
dd307d01
PB
3717 if (segment > VCPU_SREG_GS &&
3718 (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3719 ctxt->ops->cpl(ctxt) > 0)
3720 return emulate_gp(ctxt, 0);
1bd5f469 3721
dd307d01 3722 ctxt->dst.val = get_segment_selector(ctxt, segment);
b5bbf10e
NA
3723 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3724 ctxt->dst.bytes = 2;
1bd5f469
TY
3725 return X86EMUL_CONTINUE;
3726}
3727
dd307d01
PB
3728static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3729{
3730 if (ctxt->modrm_reg > VCPU_SREG_GS)
3731 return emulate_ud(ctxt);
3732
3733 return em_store_sreg(ctxt, ctxt->modrm_reg);
3734}
3735
1bd5f469
TY
3736static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3737{
9dac77fa 3738 u16 sel = ctxt->src.val;
1bd5f469 3739
9dac77fa 3740 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
1bd5f469
TY
3741 return emulate_ud(ctxt);
3742
9dac77fa 3743 if (ctxt->modrm_reg == VCPU_SREG_SS)
1bd5f469
TY
3744 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3745
3746 /* Disable writeback. */
9dac77fa
AK
3747 ctxt->dst.type = OP_NONE;
3748 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
1bd5f469
TY
3749}
3750
dd307d01
PB
3751static int em_sldt(struct x86_emulate_ctxt *ctxt)
3752{
3753 return em_store_sreg(ctxt, VCPU_SREG_LDTR);
3754}
3755
a14e579f
AK
3756static int em_lldt(struct x86_emulate_ctxt *ctxt)
3757{
3758 u16 sel = ctxt->src.val;
3759
3760 /* Disable writeback. */
3761 ctxt->dst.type = OP_NONE;
3762 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3763}
3764
dd307d01
PB
3765static int em_str(struct x86_emulate_ctxt *ctxt)
3766{
3767 return em_store_sreg(ctxt, VCPU_SREG_TR);
3768}
3769
80890006
AK
3770static int em_ltr(struct x86_emulate_ctxt *ctxt)
3771{
3772 u16 sel = ctxt->src.val;
3773
3774 /* Disable writeback. */
3775 ctxt->dst.type = OP_NONE;
3776 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3777}
3778
38503911
AK
3779static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3780{
9fa088f4
AK
3781 int rc;
3782 ulong linear;
3783
9dac77fa 3784 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
9fa088f4 3785 if (rc == X86EMUL_CONTINUE)
3cb16fe7 3786 ctxt->ops->invlpg(ctxt, linear);
38503911 3787 /* Disable writeback. */
9dac77fa 3788 ctxt->dst.type = OP_NONE;
38503911
AK
3789 return X86EMUL_CONTINUE;
3790}
3791
2d04a05b
AK
3792static int em_clts(struct x86_emulate_ctxt *ctxt)
3793{
3794 ulong cr0;
3795
3796 cr0 = ctxt->ops->get_cr(ctxt, 0);
3797 cr0 &= ~X86_CR0_TS;
3798 ctxt->ops->set_cr(ctxt, 0, cr0);
3799 return X86EMUL_CONTINUE;
3800}
3801
b34a8051 3802static int em_hypercall(struct x86_emulate_ctxt *ctxt)
26d05cc7 3803{
0f54a321 3804 int rc = ctxt->ops->fix_hypercall(ctxt);
26d05cc7 3805
26d05cc7
AK
3806 if (rc != X86EMUL_CONTINUE)
3807 return rc;
3808
3809 /* Let the processor re-execute the fixed hypercall */
9dac77fa 3810 ctxt->_eip = ctxt->eip;
26d05cc7 3811 /* Disable writeback. */
9dac77fa 3812 ctxt->dst.type = OP_NONE;
26d05cc7
AK
3813 return X86EMUL_CONTINUE;
3814}
3815
96051572
AK
3816static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3817 void (*get)(struct x86_emulate_ctxt *ctxt,
3818 struct desc_ptr *ptr))
3819{
3820 struct desc_ptr desc_ptr;
3821
ae3e61e1
PB
3822 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3823 ctxt->ops->cpl(ctxt) > 0)
3824 return emulate_gp(ctxt, 0);
3825
96051572
AK
3826 if (ctxt->mode == X86EMUL_MODE_PROT64)
3827 ctxt->op_bytes = 8;
3828 get(ctxt, &desc_ptr);
3829 if (ctxt->op_bytes == 2) {
3830 ctxt->op_bytes = 4;
3831 desc_ptr.address &= 0x00ffffff;
3832 }
3833 /* Disable writeback. */
3834 ctxt->dst.type = OP_NONE;
129a72a0
SR
3835 return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3836 &desc_ptr, 2 + ctxt->op_bytes);
96051572
AK
3837}
3838
3839static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3840{
3841 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3842}
3843
3844static int em_sidt(struct x86_emulate_ctxt *ctxt)
3845{
3846 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3847}
3848
5b7f6a1e 3849static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
26d05cc7 3850{
26d05cc7
AK
3851 struct desc_ptr desc_ptr;
3852 int rc;
3853
510425ff
AK
3854 if (ctxt->mode == X86EMUL_MODE_PROT64)
3855 ctxt->op_bytes = 8;
9dac77fa 3856 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
26d05cc7 3857 &desc_ptr.size, &desc_ptr.address,
9dac77fa 3858 ctxt->op_bytes);
26d05cc7
AK
3859 if (rc != X86EMUL_CONTINUE)
3860 return rc;
9a9abf6b 3861 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
fd8cb433 3862 emul_is_noncanonical_address(desc_ptr.address, ctxt))
9a9abf6b 3863 return emulate_gp(ctxt, 0);
5b7f6a1e
NA
3864 if (lgdt)
3865 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3866 else
3867 ctxt->ops->set_idt(ctxt, &desc_ptr);
26d05cc7 3868 /* Disable writeback. */
9dac77fa 3869 ctxt->dst.type = OP_NONE;
26d05cc7
AK
3870 return X86EMUL_CONTINUE;
3871}
3872
5b7f6a1e
NA
3873static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3874{
3875 return em_lgdt_lidt(ctxt, true);
3876}
3877
26d05cc7
AK
3878static int em_lidt(struct x86_emulate_ctxt *ctxt)
3879{
5b7f6a1e 3880 return em_lgdt_lidt(ctxt, false);
26d05cc7
AK
3881}
3882
3883static int em_smsw(struct x86_emulate_ctxt *ctxt)
3884{
ae3e61e1
PB
3885 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3886 ctxt->ops->cpl(ctxt) > 0)
3887 return emulate_gp(ctxt, 0);
3888
32e94d06
NA
3889 if (ctxt->dst.type == OP_MEM)
3890 ctxt->dst.bytes = 2;
9dac77fa 3891 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
26d05cc7
AK
3892 return X86EMUL_CONTINUE;
3893}
3894
3895static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3896{
26d05cc7 3897 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
9dac77fa
AK
3898 | (ctxt->src.val & 0x0f));
3899 ctxt->dst.type = OP_NONE;
26d05cc7
AK
3900 return X86EMUL_CONTINUE;
3901}
3902
d06e03ad
TY
3903static int em_loop(struct x86_emulate_ctxt *ctxt)
3904{
234f3ce4
NA
3905 int rc = X86EMUL_CONTINUE;
3906
01485a22 3907 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
dd856efa 3908 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
9dac77fa 3909 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
234f3ce4 3910 rc = jmp_rel(ctxt, ctxt->src.val);
d06e03ad 3911
234f3ce4 3912 return rc;
d06e03ad
TY
3913}
3914
3915static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3916{
234f3ce4
NA
3917 int rc = X86EMUL_CONTINUE;
3918
dd856efa 3919 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
234f3ce4 3920 rc = jmp_rel(ctxt, ctxt->src.val);
d06e03ad 3921
234f3ce4 3922 return rc;
d06e03ad
TY
3923}
3924
d7841a4b
TY
3925static int em_in(struct x86_emulate_ctxt *ctxt)
3926{
3927 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3928 &ctxt->dst.val))
3929 return X86EMUL_IO_NEEDED;
3930
3931 return X86EMUL_CONTINUE;
3932}
3933
3934static int em_out(struct x86_emulate_ctxt *ctxt)
3935{
3936 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3937 &ctxt->src.val, 1);
3938 /* Disable writeback. */
3939 ctxt->dst.type = OP_NONE;
3940 return X86EMUL_CONTINUE;
3941}
3942
f411e6cd
TY
3943static int em_cli(struct x86_emulate_ctxt *ctxt)
3944{
3945 if (emulator_bad_iopl(ctxt))
3946 return emulate_gp(ctxt, 0);
3947
3948 ctxt->eflags &= ~X86_EFLAGS_IF;
3949 return X86EMUL_CONTINUE;
3950}
3951
3952static int em_sti(struct x86_emulate_ctxt *ctxt)
3953{
3954 if (emulator_bad_iopl(ctxt))
3955 return emulate_gp(ctxt, 0);
3956
3957 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3958 ctxt->eflags |= X86_EFLAGS_IF;
3959 return X86EMUL_CONTINUE;
3960}
3961
6d6eede4
AK
3962static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3963{
3964 u32 eax, ebx, ecx, edx;
db2336a8
KH
3965 u64 msr = 0;
3966
3967 ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3968 if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3969 ctxt->ops->cpl(ctxt)) {
3970 return emulate_gp(ctxt, 0);
3971 }
6d6eede4 3972
dd856efa
AK
3973 eax = reg_read(ctxt, VCPU_REGS_RAX);
3974 ecx = reg_read(ctxt, VCPU_REGS_RCX);
e911eb3b 3975 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
dd856efa
AK
3976 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3977 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3978 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3979 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
6d6eede4
AK
3980 return X86EMUL_CONTINUE;
3981}
3982
98f73630
PB
3983static int em_sahf(struct x86_emulate_ctxt *ctxt)
3984{
3985 u32 flags;
3986
0efb0440
NA
3987 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3988 X86_EFLAGS_SF;
98f73630
PB
3989 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3990
3991 ctxt->eflags &= ~0xffUL;
3992 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3993 return X86EMUL_CONTINUE;
3994}
3995
2dd7caa0
AK
3996static int em_lahf(struct x86_emulate_ctxt *ctxt)
3997{
dd856efa
AK
3998 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3999 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
2dd7caa0
AK
4000 return X86EMUL_CONTINUE;
4001}
4002
9299836e
AK
4003static int em_bswap(struct x86_emulate_ctxt *ctxt)
4004{
4005 switch (ctxt->op_bytes) {
4006#ifdef CONFIG_X86_64
4007 case 8:
4008 asm("bswap %0" : "+r"(ctxt->dst.val));
4009 break;
4010#endif
4011 default:
4012 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
4013 break;
4014 }
4015 return X86EMUL_CONTINUE;
4016}
4017
13e457e0
NA
4018static int em_clflush(struct x86_emulate_ctxt *ctxt)
4019{
4020 /* emulating clflush regardless of cpuid */
4021 return X86EMUL_CONTINUE;
4022}
4023
2276b511
NA
4024static int em_movsxd(struct x86_emulate_ctxt *ctxt)
4025{
4026 ctxt->dst.val = (s32) ctxt->src.val;
4027 return X86EMUL_CONTINUE;
4028}
4029
283c95d0
RK
4030static int check_fxsr(struct x86_emulate_ctxt *ctxt)
4031{
4032 u32 eax = 1, ebx, ecx = 0, edx;
4033
e911eb3b 4034 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
283c95d0
RK
4035 if (!(edx & FFL(FXSR)))
4036 return emulate_ud(ctxt);
4037
4038 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
4039 return emulate_nm(ctxt);
4040
4041 /*
4042 * Don't emulate a case that should never be hit, instead of working
4043 * around a lack of fxsave64/fxrstor64 on old compilers.
4044 */
4045 if (ctxt->mode >= X86EMUL_MODE_PROT64)
4046 return X86EMUL_UNHANDLEABLE;
4047
4048 return X86EMUL_CONTINUE;
4049}
4050
9d643f63
ND
4051/*
4052 * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
4053 * and restore MXCSR.
4054 */
4055static size_t __fxstate_size(int nregs)
4056{
4057 return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
4058}
4059
4060static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
4061{
4062 bool cr4_osfxsr;
4063 if (ctxt->mode == X86EMUL_MODE_PROT64)
4064 return __fxstate_size(16);
4065
4066 cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
4067 return __fxstate_size(cr4_osfxsr ? 8 : 0);
4068}
4069
283c95d0
RK
4070/*
4071 * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
4072 * 1) 16 bit mode
4073 * 2) 32 bit mode
4074 * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
4075 * preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
4076 * save and restore
4077 * 3) 64-bit mode with REX.W prefix
4078 * - like (2), but XMM 8-15 are being saved and restored
4079 * 4) 64-bit mode without REX.W prefix
4080 * - like (3), but FIP and FDP are 64 bit
4081 *
4082 * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
4083 * desired result. (4) is not emulated.
4084 *
4085 * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
4086 * and FPU DS) should match.
4087 */
4088static int em_fxsave(struct x86_emulate_ctxt *ctxt)
4089{
4090 struct fxregs_state fx_state;
283c95d0
RK
4091 int rc;
4092
4093 rc = check_fxsr(ctxt);
4094 if (rc != X86EMUL_CONTINUE)
4095 return rc;
4096
283c95d0
RK
4097 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
4098
283c95d0
RK
4099 if (rc != X86EMUL_CONTINUE)
4100 return rc;
4101
9d643f63
ND
4102 return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
4103 fxstate_size(ctxt));
283c95d0
RK
4104}
4105
4d772cb8
DH
4106/*
4107 * FXRSTOR might restore XMM registers not provided by the guest. Fill
4108 * in the host registers (via FXSAVE) instead, so they won't be modified.
4109 * (preemption has to stay disabled until FXRSTOR).
4110 *
4111 * Use noinline to keep the stack for other functions called by callers small.
4112 */
4113static noinline int fxregs_fixup(struct fxregs_state *fx_state,
4114 const size_t used_size)
4115{
4116 struct fxregs_state fx_tmp;
4117 int rc;
4118
4119 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
4120 memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
4121 __fxstate_size(16) - used_size);
4122
4123 return rc;
4124}
4125
283c95d0
RK
4126static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4127{
4128 struct fxregs_state fx_state;
4129 int rc;
9d643f63 4130 size_t size;
283c95d0
RK
4131
4132 rc = check_fxsr(ctxt);
4133 if (rc != X86EMUL_CONTINUE)
4134 return rc;
4135
4d772cb8
DH
4136 size = fxstate_size(ctxt);
4137 rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
4138 if (rc != X86EMUL_CONTINUE)
4139 return rc;
4140
9d643f63 4141 if (size < __fxstate_size(16)) {
4d772cb8 4142 rc = fxregs_fixup(&fx_state, size);
9d643f63
ND
4143 if (rc != X86EMUL_CONTINUE)
4144 goto out;
4145 }
283c95d0 4146
9d643f63
ND
4147 if (fx_state.mxcsr >> 16) {
4148 rc = emulate_gp(ctxt, 0);
4149 goto out;
4150 }
283c95d0
RK
4151
4152 if (rc == X86EMUL_CONTINUE)
4153 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4154
9d643f63 4155out:
283c95d0
RK
4156 return rc;
4157}
4158
cfec82cb
JR
4159static bool valid_cr(int nr)
4160{
4161 switch (nr) {
4162 case 0:
4163 case 2 ... 4:
4164 case 8:
4165 return true;
4166 default:
4167 return false;
4168 }
4169}
4170
4171static int check_cr_read(struct x86_emulate_ctxt *ctxt)
4172{
9dac77fa 4173 if (!valid_cr(ctxt->modrm_reg))
cfec82cb
JR
4174 return emulate_ud(ctxt);
4175
4176 return X86EMUL_CONTINUE;
4177}
4178
4179static int check_cr_write(struct x86_emulate_ctxt *ctxt)
4180{
9dac77fa
AK
4181 u64 new_val = ctxt->src.val64;
4182 int cr = ctxt->modrm_reg;
c2ad2bb3 4183 u64 efer = 0;
cfec82cb
JR
4184
4185 static u64 cr_reserved_bits[] = {
4186 0xffffffff00000000ULL,
4187 0, 0, 0, /* CR3 checked later */
4188 CR4_RESERVED_BITS,
4189 0, 0, 0,
4190 CR8_RESERVED_BITS,
4191 };
4192
4193 if (!valid_cr(cr))
4194 return emulate_ud(ctxt);
4195
4196 if (new_val & cr_reserved_bits[cr])
4197 return emulate_gp(ctxt, 0);
4198
4199 switch (cr) {
4200 case 0: {
c2ad2bb3 4201 u64 cr4;
cfec82cb
JR
4202 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
4203 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
4204 return emulate_gp(ctxt, 0);
4205
717746e3
AK
4206 cr4 = ctxt->ops->get_cr(ctxt, 4);
4207 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
cfec82cb
JR
4208
4209 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
4210 !(cr4 & X86_CR4_PAE))
4211 return emulate_gp(ctxt, 0);
4212
4213 break;
4214 }
4215 case 3: {
4216 u64 rsvd = 0;
4217
c2ad2bb3 4218 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
d1cd3ce9
YZ
4219 if (efer & EFER_LMA) {
4220 u64 maxphyaddr;
d6500149 4221 u32 eax, ebx, ecx, edx;
d1cd3ce9 4222
d6500149
YZ
4223 eax = 0x80000008;
4224 ecx = 0;
4225 if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
4226 &edx, false))
d1cd3ce9
YZ
4227 maxphyaddr = eax & 0xff;
4228 else
4229 maxphyaddr = 36;
a780a3ea
WL
4230 rsvd = rsvd_bits(maxphyaddr, 63);
4231 if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE)
208320ba 4232 rsvd &= ~X86_CR3_PCID_NOFLUSH;
d1cd3ce9 4233 }
cfec82cb
JR
4234
4235 if (new_val & rsvd)
4236 return emulate_gp(ctxt, 0);
4237
4238 break;
4239 }
4240 case 4: {
717746e3 4241 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
cfec82cb
JR
4242
4243 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
4244 return emulate_gp(ctxt, 0);
4245
4246 break;
4247 }
4248 }
4249
4250 return X86EMUL_CONTINUE;
4251}
4252
3b88e41a
JR
4253static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
4254{
4255 unsigned long dr7;
4256
717746e3 4257 ctxt->ops->get_dr(ctxt, 7, &dr7);
3b88e41a
JR
4258
4259 /* Check if DR7.Global_Enable is set */
4260 return dr7 & (1 << 13);
4261}
4262
4263static int check_dr_read(struct x86_emulate_ctxt *ctxt)
4264{
9dac77fa 4265 int dr = ctxt->modrm_reg;
3b88e41a
JR
4266 u64 cr4;
4267
4268 if (dr > 7)
4269 return emulate_ud(ctxt);
4270
717746e3 4271 cr4 = ctxt->ops->get_cr(ctxt, 4);
3b88e41a
JR
4272 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
4273 return emulate_ud(ctxt);
4274
6d2a0526
NA
4275 if (check_dr7_gd(ctxt)) {
4276 ulong dr6;
4277
4278 ctxt->ops->get_dr(ctxt, 6, &dr6);
1fc5d194 4279 dr6 &= ~DR_TRAP_BITS;
6d2a0526
NA
4280 dr6 |= DR6_BD | DR6_RTM;
4281 ctxt->ops->set_dr(ctxt, 6, dr6);
3b88e41a 4282 return emulate_db(ctxt);
6d2a0526 4283 }
3b88e41a
JR
4284
4285 return X86EMUL_CONTINUE;
4286}
4287
4288static int check_dr_write(struct x86_emulate_ctxt *ctxt)
4289{
9dac77fa
AK
4290 u64 new_val = ctxt->src.val64;
4291 int dr = ctxt->modrm_reg;
3b88e41a
JR
4292
4293 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
4294 return emulate_gp(ctxt, 0);
4295
4296 return check_dr_read(ctxt);
4297}
4298
01de8b09
JR
4299static int check_svme(struct x86_emulate_ctxt *ctxt)
4300{
92ceb767 4301 u64 efer = 0;
01de8b09 4302
717746e3 4303 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
01de8b09
JR
4304
4305 if (!(efer & EFER_SVME))
4306 return emulate_ud(ctxt);
4307
4308 return X86EMUL_CONTINUE;
4309}
4310
4311static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
4312{
dd856efa 4313 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
01de8b09
JR
4314
4315 /* Valid physical address? */
d4224449 4316 if (rax & 0xffff000000000000ULL)
01de8b09
JR
4317 return emulate_gp(ctxt, 0);
4318
4319 return check_svme(ctxt);
4320}
4321
d7eb8203
JR
4322static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
4323{
717746e3 4324 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
d7eb8203 4325
717746e3 4326 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
d7eb8203
JR
4327 return emulate_ud(ctxt);
4328
4329 return X86EMUL_CONTINUE;
4330}
4331
8061252e
JR
4332static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
4333{
717746e3 4334 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
dd856efa 4335 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
8061252e 4336
2d7921c4
AM
4337 /*
4338 * VMware allows access to these Pseduo-PMCs even when read via RDPMC
4339 * in Ring3 when CR4.PCE=0.
4340 */
4341 if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
4342 return X86EMUL_CONTINUE;
4343
717746e3 4344 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
67f4d428 4345 ctxt->ops->check_pmc(ctxt, rcx))
8061252e
JR
4346 return emulate_gp(ctxt, 0);
4347
4348 return X86EMUL_CONTINUE;
4349}
4350
f6511935
JR
4351static int check_perm_in(struct x86_emulate_ctxt *ctxt)
4352{
9dac77fa
AK
4353 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
4354 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
f6511935
JR
4355 return emulate_gp(ctxt, 0);
4356
4357 return X86EMUL_CONTINUE;
4358}
4359
4360static int check_perm_out(struct x86_emulate_ctxt *ctxt)
4361{
9dac77fa
AK
4362 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
4363 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
f6511935
JR
4364 return emulate_gp(ctxt, 0);
4365
4366 return X86EMUL_CONTINUE;
4367}
4368
73fba5f4 4369#define D(_y) { .flags = (_y) }
d40a6898
PB
4370#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4371#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4372 .intercept = x86_intercept_##_i, .check_perm = (_p) }
0b789eee 4373#define N D(NotImpl)
01de8b09 4374#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
1c2545be
TY
4375#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4376#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
39f062ff 4377#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
2276b511 4378#define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
045a282c 4379#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
73fba5f4 4380#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
e28bbd44 4381#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
c4f035c6 4382#define II(_f, _e, _i) \
d40a6898 4383 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
d09beabd 4384#define IIP(_f, _e, _i, _p) \
d40a6898
PB
4385 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4386 .intercept = x86_intercept_##_i, .check_perm = (_p) }
aa97bb48 4387#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
73fba5f4 4388
8d8f4e9f 4389#define D2bv(_f) D((_f) | ByteOp), D(_f)
f6511935 4390#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
8d8f4e9f 4391#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
f7857f35 4392#define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
d7841a4b
TY
4393#define I2bvIP(_f, _e, _i, _p) \
4394 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
8d8f4e9f 4395
fb864fbc
AK
4396#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4397 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4398 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
6230f7fc 4399
0f54a321
NA
4400static const struct opcode group7_rm0[] = {
4401 N,
b34a8051 4402 I(SrcNone | Priv | EmulateOnUD, em_hypercall),
0f54a321
NA
4403 N, N, N, N, N, N,
4404};
4405
fd0a0d82 4406static const struct opcode group7_rm1[] = {
1c2545be
TY
4407 DI(SrcNone | Priv, monitor),
4408 DI(SrcNone | Priv, mwait),
d7eb8203
JR
4409 N, N, N, N, N, N,
4410};
4411
fd0a0d82 4412static const struct opcode group7_rm3[] = {
1c2545be 4413 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
b34a8051 4414 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
1c2545be
TY
4415 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
4416 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
4417 DIP(SrcNone | Prot | Priv, stgi, check_svme),
4418 DIP(SrcNone | Prot | Priv, clgi, check_svme),
4419 DIP(SrcNone | Prot | Priv, skinit, check_svme),
4420 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
01de8b09 4421};
6230f7fc 4422
fd0a0d82 4423static const struct opcode group7_rm7[] = {
d7eb8203 4424 N,
1c2545be 4425 DIP(SrcNone, rdtscp, check_rdtsc),
d7eb8203
JR
4426 N, N, N, N, N, N,
4427};
d67fc27a 4428
fd0a0d82 4429static const struct opcode group1[] = {
fb864fbc
AK
4430 F(Lock, em_add),
4431 F(Lock | PageTable, em_or),
4432 F(Lock, em_adc),
4433 F(Lock, em_sbb),
4434 F(Lock | PageTable, em_and),
4435 F(Lock, em_sub),
4436 F(Lock, em_xor),
4437 F(NoWrite, em_cmp),
73fba5f4
AK
4438};
4439
fd0a0d82 4440static const struct opcode group1A[] = {
0f89b207 4441 I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
73fba5f4
AK
4442};
4443
007a3b54
AK
4444static const struct opcode group2[] = {
4445 F(DstMem | ModRM, em_rol),
4446 F(DstMem | ModRM, em_ror),
4447 F(DstMem | ModRM, em_rcl),
4448 F(DstMem | ModRM, em_rcr),
4449 F(DstMem | ModRM, em_shl),
4450 F(DstMem | ModRM, em_shr),
4451 F(DstMem | ModRM, em_shl),
4452 F(DstMem | ModRM, em_sar),
4453};
4454
fd0a0d82 4455static const struct opcode group3[] = {
fb864fbc
AK
4456 F(DstMem | SrcImm | NoWrite, em_test),
4457 F(DstMem | SrcImm | NoWrite, em_test),
45a1467d
AK
4458 F(DstMem | SrcNone | Lock, em_not),
4459 F(DstMem | SrcNone | Lock, em_neg),
b9fa409b
AK
4460 F(DstXacc | Src2Mem, em_mul_ex),
4461 F(DstXacc | Src2Mem, em_imul_ex),
b8c0b6ae
AK
4462 F(DstXacc | Src2Mem, em_div_ex),
4463 F(DstXacc | Src2Mem, em_idiv_ex),
73fba5f4
AK
4464};
4465
fd0a0d82 4466static const struct opcode group4[] = {
95413dc4
AK
4467 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4468 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
73fba5f4
AK
4469 N, N, N, N, N, N,
4470};
4471
fd0a0d82 4472static const struct opcode group5[] = {
95413dc4
AK
4473 F(DstMem | SrcNone | Lock, em_inc),
4474 F(DstMem | SrcNone | Lock, em_dec),
58b7075d 4475 I(SrcMem | NearBranch, em_call_near_abs),
acac6f89 4476 I(SrcMemFAddr | ImplicitOps, em_call_far),
58b7075d 4477 I(SrcMem | NearBranch, em_jmp_abs),
f7784046 4478 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
0f89b207 4479 I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined),
73fba5f4
AK
4480};
4481
fd0a0d82 4482static const struct opcode group6[] = {
dd307d01
PB
4483 II(Prot | DstMem, em_sldt, sldt),
4484 II(Prot | DstMem, em_str, str),
a14e579f 4485 II(Prot | Priv | SrcMem16, em_lldt, lldt),
80890006 4486 II(Prot | Priv | SrcMem16, em_ltr, ltr),
dee6bb70
JR
4487 N, N, N, N,
4488};
4489
fd0a0d82 4490static const struct group_dual group7 = { {
606b1c3e
NA
4491 II(Mov | DstMem, em_sgdt, sgdt),
4492 II(Mov | DstMem, em_sidt, sidt),
1c2545be
TY
4493 II(SrcMem | Priv, em_lgdt, lgdt),
4494 II(SrcMem | Priv, em_lidt, lidt),
4495 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4496 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4497 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
73fba5f4 4498}, {
0f54a321 4499 EXT(0, group7_rm0),
5ef39c71 4500 EXT(0, group7_rm1),
01de8b09 4501 N, EXT(0, group7_rm3),
1c2545be
TY
4502 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4503 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4504 EXT(0, group7_rm7),
73fba5f4
AK
4505} };
4506
fd0a0d82 4507static const struct opcode group8[] = {
73fba5f4 4508 N, N, N, N,
11c363ba
AK
4509 F(DstMem | SrcImmByte | NoWrite, em_bt),
4510 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
4511 F(DstMem | SrcImmByte | Lock, em_btr),
4512 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
73fba5f4
AK
4513};
4514
fb6d4d34
PB
4515/*
4516 * The "memory" destination is actually always a register, since we come
4517 * from the register case of group9.
4518 */
4519static const struct gprefix pfx_0f_c7_7 = {
4520 N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdtscp),
4521};
4522
4523
fd0a0d82 4524static const struct group_dual group9 = { {
1c2545be 4525 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
73fba5f4 4526}, {
fb6d4d34
PB
4527 N, N, N, N, N, N, N,
4528 GP(0, &pfx_0f_c7_7),
73fba5f4
AK
4529} };
4530
fd0a0d82 4531static const struct opcode group11[] = {
1c2545be 4532 I(DstMem | SrcImm | Mov | PageTable, em_mov),
d5ae7ce8 4533 X7(D(Undefined)),
a4d4a7c1
AK
4534};
4535
13e457e0 4536static const struct gprefix pfx_0f_ae_7 = {
3f6f1480 4537 I(SrcMem | ByteOp, em_clflush), N, N, N,
13e457e0
NA
4538};
4539
4540static const struct group_dual group15 = { {
283c95d0
RK
4541 I(ModRM | Aligned16, em_fxsave),
4542 I(ModRM | Aligned16, em_fxrstor),
4543 N, N, N, N, N, GP(0, &pfx_0f_ae_7),
13e457e0
NA
4544}, {
4545 N, N, N, N, N, N, N, N,
4546} };
4547
fd0a0d82 4548static const struct gprefix pfx_0f_6f_0f_7f = {
e5971755 4549 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
aa97bb48
AK
4550};
4551
39f062ff
NA
4552static const struct instr_dual instr_dual_0f_2b = {
4553 I(0, em_mov), N
4554};
4555
d5b77069 4556static const struct gprefix pfx_0f_2b = {
39f062ff 4557 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
3e114eb4
AK
4558};
4559
29916968
SF
4560static const struct gprefix pfx_0f_10_0f_11 = {
4561 I(Unaligned, em_mov), I(Unaligned, em_mov), N, N,
4562};
4563
27ce8258 4564static const struct gprefix pfx_0f_28_0f_29 = {
6fec27d8 4565 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
27ce8258
IM
4566};
4567
0a37027e
AW
4568static const struct gprefix pfx_0f_e7 = {
4569 N, I(Sse, em_mov), N, N,
4570};
4571
045a282c 4572static const struct escape escape_d9 = { {
16bebefe 4573 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
045a282c
GN
4574}, {
4575 /* 0xC0 - 0xC7 */
4576 N, N, N, N, N, N, N, N,
4577 /* 0xC8 - 0xCF */
4578 N, N, N, N, N, N, N, N,
4579 /* 0xD0 - 0xC7 */
4580 N, N, N, N, N, N, N, N,
4581 /* 0xD8 - 0xDF */
4582 N, N, N, N, N, N, N, N,
4583 /* 0xE0 - 0xE7 */
4584 N, N, N, N, N, N, N, N,
4585 /* 0xE8 - 0xEF */
4586 N, N, N, N, N, N, N, N,
4587 /* 0xF0 - 0xF7 */
4588 N, N, N, N, N, N, N, N,
4589 /* 0xF8 - 0xFF */
4590 N, N, N, N, N, N, N, N,
4591} };
4592
4593static const struct escape escape_db = { {
4594 N, N, N, N, N, N, N, N,
4595}, {
4596 /* 0xC0 - 0xC7 */
4597 N, N, N, N, N, N, N, N,
4598 /* 0xC8 - 0xCF */
4599 N, N, N, N, N, N, N, N,
4600 /* 0xD0 - 0xC7 */
4601 N, N, N, N, N, N, N, N,
4602 /* 0xD8 - 0xDF */
4603 N, N, N, N, N, N, N, N,
4604 /* 0xE0 - 0xE7 */
4605 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4606 /* 0xE8 - 0xEF */
4607 N, N, N, N, N, N, N, N,
4608 /* 0xF0 - 0xF7 */
4609 N, N, N, N, N, N, N, N,
4610 /* 0xF8 - 0xFF */
4611 N, N, N, N, N, N, N, N,
4612} };
4613
4614static const struct escape escape_dd = { {
16bebefe 4615 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
045a282c
GN
4616}, {
4617 /* 0xC0 - 0xC7 */
4618 N, N, N, N, N, N, N, N,
4619 /* 0xC8 - 0xCF */
4620 N, N, N, N, N, N, N, N,
4621 /* 0xD0 - 0xC7 */
4622 N, N, N, N, N, N, N, N,
4623 /* 0xD8 - 0xDF */
4624 N, N, N, N, N, N, N, N,
4625 /* 0xE0 - 0xE7 */
4626 N, N, N, N, N, N, N, N,
4627 /* 0xE8 - 0xEF */
4628 N, N, N, N, N, N, N, N,
4629 /* 0xF0 - 0xF7 */
4630 N, N, N, N, N, N, N, N,
4631 /* 0xF8 - 0xFF */
4632 N, N, N, N, N, N, N, N,
4633} };
4634
39f062ff
NA
4635static const struct instr_dual instr_dual_0f_c3 = {
4636 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4637};
4638
2276b511
NA
4639static const struct mode_dual mode_dual_63 = {
4640 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4641};
4642
fd0a0d82 4643static const struct opcode opcode_table[256] = {
73fba5f4 4644 /* 0x00 - 0x07 */
fb864fbc 4645 F6ALU(Lock, em_add),
1cd196ea
AK
4646 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4647 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
73fba5f4 4648 /* 0x08 - 0x0F */
fb864fbc 4649 F6ALU(Lock | PageTable, em_or),
1cd196ea
AK
4650 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4651 N,
73fba5f4 4652 /* 0x10 - 0x17 */
fb864fbc 4653 F6ALU(Lock, em_adc),
1cd196ea
AK
4654 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4655 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
73fba5f4 4656 /* 0x18 - 0x1F */
fb864fbc 4657 F6ALU(Lock, em_sbb),
1cd196ea
AK
4658 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4659 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
73fba5f4 4660 /* 0x20 - 0x27 */
fb864fbc 4661 F6ALU(Lock | PageTable, em_and), N, N,
73fba5f4 4662 /* 0x28 - 0x2F */
fb864fbc 4663 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
73fba5f4 4664 /* 0x30 - 0x37 */
fb864fbc 4665 F6ALU(Lock, em_xor), N, N,
73fba5f4 4666 /* 0x38 - 0x3F */
fb864fbc 4667 F6ALU(NoWrite, em_cmp), N, N,
73fba5f4 4668 /* 0x40 - 0x4F */
95413dc4 4669 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
73fba5f4 4670 /* 0x50 - 0x57 */
63540382 4671 X8(I(SrcReg | Stack, em_push)),
73fba5f4 4672 /* 0x58 - 0x5F */
c54fe504 4673 X8(I(DstReg | Stack, em_pop)),
73fba5f4 4674 /* 0x60 - 0x67 */
b96a7fad
TY
4675 I(ImplicitOps | Stack | No64, em_pusha),
4676 I(ImplicitOps | Stack | No64, em_popa),
2276b511 4677 N, MD(ModRM, &mode_dual_63),
73fba5f4
AK
4678 N, N, N, N,
4679 /* 0x68 - 0x6F */
d46164db
AK
4680 I(SrcImm | Mov | Stack, em_push),
4681 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
f3a1b9f4
AK
4682 I(SrcImmByte | Mov | Stack, em_push),
4683 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
b3356bf0 4684 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
2b5e97e1 4685 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
73fba5f4 4686 /* 0x70 - 0x7F */
58b7075d 4687 X16(D(SrcImmByte | NearBranch)),
73fba5f4 4688 /* 0x80 - 0x87 */
1c2545be
TY
4689 G(ByteOp | DstMem | SrcImm, group1),
4690 G(DstMem | SrcImm, group1),
4691 G(ByteOp | DstMem | SrcImm | No64, group1),
4692 G(DstMem | SrcImmByte, group1),
fb864fbc 4693 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
d5ae7ce8 4694 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
73fba5f4 4695 /* 0x88 - 0x8F */
d5ae7ce8 4696 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
b9eac5f4 4697 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
d5ae7ce8 4698 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
1bd5f469
TY
4699 D(ModRM | SrcMem | NoAccess | DstReg),
4700 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4701 G(0, group1A),
73fba5f4 4702 /* 0x90 - 0x97 */
bf608f88 4703 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
73fba5f4 4704 /* 0x98 - 0x9F */
61429142 4705 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
cc4feed5 4706 I(SrcImmFAddr | No64, em_call_far), N,
62aaa2f0 4707 II(ImplicitOps | Stack, em_pushf, pushf),
98f73630
PB
4708 II(ImplicitOps | Stack, em_popf, popf),
4709 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
73fba5f4 4710 /* 0xA0 - 0xA7 */
b9eac5f4 4711 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
d5ae7ce8 4712 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
0f89b207
TL
4713 I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4714 F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
73fba5f4 4715 /* 0xA8 - 0xAF */
fb864fbc 4716 F2bv(DstAcc | SrcImm | NoWrite, em_test),
b9eac5f4
AK
4717 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4718 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
5aca3722 4719 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
73fba5f4 4720 /* 0xB0 - 0xB7 */
b9eac5f4 4721 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
73fba5f4 4722 /* 0xB8 - 0xBF */
5e2c6883 4723 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
73fba5f4 4724 /* 0xC0 - 0xC7 */
007a3b54 4725 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
58b7075d
NA
4726 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4727 I(ImplicitOps | NearBranch, em_ret),
d4b4325f
AK
4728 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4729 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
a4d4a7c1 4730 G(ByteOp, group11), G(0, group11),
73fba5f4 4731 /* 0xC8 - 0xCF */
612e89f0 4732 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
16794aaa
NA
4733 I(ImplicitOps | SrcImmU16, em_ret_far_imm),
4734 I(ImplicitOps, em_ret_far),
3c6e276f 4735 D(ImplicitOps), DI(SrcImmByte, intn),
db5b0762 4736 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
73fba5f4 4737 /* 0xD0 - 0xD7 */
007a3b54
AK
4738 G(Src2One | ByteOp, group2), G(Src2One, group2),
4739 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
a035d5c6 4740 I(DstAcc | SrcImmUByte | No64, em_aam),
326f578f
PB
4741 I(DstAcc | SrcImmUByte | No64, em_aad),
4742 F(DstAcc | ByteOp | No64, em_salc),
7fa57952 4743 I(DstAcc | SrcXLat | ByteOp, em_mov),
73fba5f4 4744 /* 0xD8 - 0xDF */
045a282c 4745 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
73fba5f4 4746 /* 0xE0 - 0xE7 */
58b7075d
NA
4747 X3(I(SrcImmByte | NearBranch, em_loop)),
4748 I(SrcImmByte | NearBranch, em_jcxz),
d7841a4b
TY
4749 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4750 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
73fba5f4 4751 /* 0xE8 - 0xEF */
58b7075d
NA
4752 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4753 I(SrcImmFAddr | No64, em_jmp_far),
4754 D(SrcImmByte | ImplicitOps | NearBranch),
d7841a4b
TY
4755 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4756 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
73fba5f4 4757 /* 0xF0 - 0xF7 */
bf608f88 4758 N, DI(ImplicitOps, icebp), N, N,
3c6e276f
AK
4759 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4760 G(ByteOp, group3), G(0, group3),
73fba5f4 4761 /* 0xF8 - 0xFF */
f411e6cd
TY
4762 D(ImplicitOps), D(ImplicitOps),
4763 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
73fba5f4
AK
4764 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4765};
4766
fd0a0d82 4767static const struct opcode twobyte_table[256] = {
73fba5f4 4768 /* 0x00 - 0x0F */
dee6bb70 4769 G(0, group6), GD(0, &group7), N, N,
b51e974f 4770 N, I(ImplicitOps | EmulateOnUD, em_syscall),
db5b0762 4771 II(ImplicitOps | Priv, em_clts, clts), N,
3c6e276f 4772 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
3f6f1480 4773 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
73fba5f4 4774 /* 0x10 - 0x1F */
29916968
SF
4775 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11),
4776 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11),
4777 N, N, N, N, N, N,
3f6f1480
NA
4778 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4779 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
73fba5f4 4780 /* 0x20 - 0x2F */
9b88ae99
NA
4781 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4782 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4783 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4784 check_cr_write),
4785 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4786 check_dr_write),
73fba5f4 4787 N, N, N, N,
27ce8258
IM
4788 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4789 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
d5b77069 4790 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
3e114eb4 4791 N, N, N, N,
73fba5f4 4792 /* 0x30 - 0x3F */
e1e210b0 4793 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
8061252e 4794 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
e1e210b0 4795 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
222d21aa 4796 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
b51e974f
BP
4797 I(ImplicitOps | EmulateOnUD, em_sysenter),
4798 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
d867162c 4799 N, N,
73fba5f4
AK
4800 N, N, N, N, N, N, N, N,
4801 /* 0x40 - 0x4F */
140bad89 4802 X16(D(DstReg | SrcMem | ModRM)),
73fba5f4
AK
4803 /* 0x50 - 0x5F */
4804 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4805 /* 0x60 - 0x6F */
aa97bb48
AK
4806 N, N, N, N,
4807 N, N, N, N,
4808 N, N, N, N,
4809 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
73fba5f4 4810 /* 0x70 - 0x7F */
aa97bb48
AK
4811 N, N, N, N,
4812 N, N, N, N,
4813 N, N, N, N,
4814 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
73fba5f4 4815 /* 0x80 - 0x8F */
58b7075d 4816 X16(D(SrcImm | NearBranch)),
73fba5f4 4817 /* 0x90 - 0x9F */
ee45b58e 4818 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
73fba5f4 4819 /* 0xA0 - 0xA7 */
1cd196ea 4820 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
11c363ba
AK
4821 II(ImplicitOps, em_cpuid, cpuid),
4822 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
0bdea068
AK
4823 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4824 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
73fba5f4 4825 /* 0xA8 - 0xAF */
1cd196ea 4826 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
89651a3d 4827 II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
11c363ba 4828 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
0bdea068
AK
4829 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4830 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
13e457e0 4831 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
73fba5f4 4832 /* 0xB0 - 0xB7 */
2fcf5c8a 4833 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
d4b4325f 4834 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
11c363ba 4835 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
d4b4325f
AK
4836 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4837 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
2adb5ad9 4838 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
73fba5f4
AK
4839 /* 0xB8 - 0xBF */
4840 N, N,
ce7faab2 4841 G(BitOp, group8),
11c363ba 4842 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
900efe20
NA
4843 I(DstReg | SrcMem | ModRM, em_bsf_c),
4844 I(DstReg | SrcMem | ModRM, em_bsr_c),
2adb5ad9 4845 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
9299836e 4846 /* 0xC0 - 0xC7 */
e47a5f5f 4847 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
39f062ff 4848 N, ID(0, &instr_dual_0f_c3),
73fba5f4 4849 N, N, N, GD(0, &group9),
9299836e
AK
4850 /* 0xC8 - 0xCF */
4851 X8(I(DstReg, em_bswap)),
73fba5f4
AK
4852 /* 0xD0 - 0xDF */
4853 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4854 /* 0xE0 - 0xEF */
0a37027e
AW
4855 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4856 N, N, N, N, N, N, N, N,
73fba5f4
AK
4857 /* 0xF0 - 0xFF */
4858 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4859};
4860
39f062ff
NA
4861static const struct instr_dual instr_dual_0f_38_f0 = {
4862 I(DstReg | SrcMem | Mov, em_movbe), N
4863};
4864
4865static const struct instr_dual instr_dual_0f_38_f1 = {
4866 I(DstMem | SrcReg | Mov, em_movbe), N
4867};
4868
0bc5eedb 4869static const struct gprefix three_byte_0f_38_f0 = {
39f062ff 4870 ID(0, &instr_dual_0f_38_f0), N, N, N
0bc5eedb
BP
4871};
4872
4873static const struct gprefix three_byte_0f_38_f1 = {
39f062ff 4874 ID(0, &instr_dual_0f_38_f1), N, N, N
0bc5eedb
BP
4875};
4876
4877/*
4878 * Insns below are selected by the prefix which indexed by the third opcode
4879 * byte.
4880 */
4881static const struct opcode opcode_map_0f_38[256] = {
4882 /* 0x00 - 0x7f */
4883 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
84cffe49
BP
4884 /* 0x80 - 0xef */
4885 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4886 /* 0xf0 - 0xf1 */
53bb4f78
NA
4887 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4888 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
84cffe49
BP
4889 /* 0xf2 - 0xff */
4890 N, N, X4(N), X8(N)
0bc5eedb
BP
4891};
4892
73fba5f4
AK
4893#undef D
4894#undef N
4895#undef G
4896#undef GD
4897#undef I
aa97bb48 4898#undef GP
01de8b09 4899#undef EXT
2276b511 4900#undef MD
2b42fce6 4901#undef ID
73fba5f4 4902
8d8f4e9f 4903#undef D2bv
f6511935 4904#undef D2bvIP
8d8f4e9f 4905#undef I2bv
d7841a4b 4906#undef I2bvIP
d67fc27a 4907#undef I6ALU
8d8f4e9f 4908
9dac77fa 4909static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
39f21ee5
AK
4910{
4911 unsigned size;
4912
9dac77fa 4913 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
39f21ee5
AK
4914 if (size == 8)
4915 size = 4;
4916 return size;
4917}
4918
4919static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4920 unsigned size, bool sign_extension)
4921{
39f21ee5
AK
4922 int rc = X86EMUL_CONTINUE;
4923
4924 op->type = OP_IMM;
4925 op->bytes = size;
9dac77fa 4926 op->addr.mem.ea = ctxt->_eip;
39f21ee5
AK
4927 /* NB. Immediates are sign-extended as necessary. */
4928 switch (op->bytes) {
4929 case 1:
e85a1085 4930 op->val = insn_fetch(s8, ctxt);
39f21ee5
AK
4931 break;
4932 case 2:
e85a1085 4933 op->val = insn_fetch(s16, ctxt);
39f21ee5
AK
4934 break;
4935 case 4:
e85a1085 4936 op->val = insn_fetch(s32, ctxt);
39f21ee5 4937 break;
5e2c6883
NA
4938 case 8:
4939 op->val = insn_fetch(s64, ctxt);
4940 break;
39f21ee5
AK
4941 }
4942 if (!sign_extension) {
4943 switch (op->bytes) {
4944 case 1:
4945 op->val &= 0xff;
4946 break;
4947 case 2:
4948 op->val &= 0xffff;
4949 break;
4950 case 4:
4951 op->val &= 0xffffffff;
4952 break;
4953 }
4954 }
4955done:
4956 return rc;
4957}
4958
a9945549
AK
4959static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4960 unsigned d)
4961{
4962 int rc = X86EMUL_CONTINUE;
4963
4964 switch (d) {
4965 case OpReg:
2adb5ad9 4966 decode_register_operand(ctxt, op);
a9945549
AK
4967 break;
4968 case OpImmUByte:
608aabe3 4969 rc = decode_imm(ctxt, op, 1, false);
a9945549
AK
4970 break;
4971 case OpMem:
41ddf978 4972 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
0fe59128
AK
4973 mem_common:
4974 *op = ctxt->memop;
4975 ctxt->memopp = op;
96888977 4976 if (ctxt->d & BitOp)
a9945549
AK
4977 fetch_bit_operand(ctxt);
4978 op->orig_val = op->val;
4979 break;
41ddf978 4980 case OpMem64:
aaa05f24 4981 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
41ddf978 4982 goto mem_common;
a9945549
AK
4983 case OpAcc:
4984 op->type = OP_REG;
4985 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
dd856efa 4986 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
a9945549
AK
4987 fetch_register_operand(op);
4988 op->orig_val = op->val;
4989 break;
820207c8
AK
4990 case OpAccLo:
4991 op->type = OP_REG;
4992 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4993 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4994 fetch_register_operand(op);
4995 op->orig_val = op->val;
4996 break;
4997 case OpAccHi:
4998 if (ctxt->d & ByteOp) {
4999 op->type = OP_NONE;
5000 break;
5001 }
5002 op->type = OP_REG;
5003 op->bytes = ctxt->op_bytes;
5004 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
5005 fetch_register_operand(op);
5006 op->orig_val = op->val;
5007 break;
a9945549
AK
5008 case OpDI:
5009 op->type = OP_MEM;
5010 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5011 op->addr.mem.ea =
01485a22 5012 register_address(ctxt, VCPU_REGS_RDI);
a9945549
AK
5013 op->addr.mem.seg = VCPU_SREG_ES;
5014 op->val = 0;
b3356bf0 5015 op->count = 1;
a9945549
AK
5016 break;
5017 case OpDX:
5018 op->type = OP_REG;
5019 op->bytes = 2;
dd856efa 5020 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
a9945549
AK
5021 fetch_register_operand(op);
5022 break;
4dd6a57d 5023 case OpCL:
d29b9d7e 5024 op->type = OP_IMM;
4dd6a57d 5025 op->bytes = 1;
dd856efa 5026 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4dd6a57d
AK
5027 break;
5028 case OpImmByte:
5029 rc = decode_imm(ctxt, op, 1, true);
5030 break;
5031 case OpOne:
d29b9d7e 5032 op->type = OP_IMM;
4dd6a57d
AK
5033 op->bytes = 1;
5034 op->val = 1;
5035 break;
5036 case OpImm:
5037 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
5038 break;
5e2c6883
NA
5039 case OpImm64:
5040 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
5041 break;
28867cee
AK
5042 case OpMem8:
5043 ctxt->memop.bytes = 1;
660696d1 5044 if (ctxt->memop.type == OP_REG) {
aa9ac1a6
GN
5045 ctxt->memop.addr.reg = decode_register(ctxt,
5046 ctxt->modrm_rm, true);
660696d1
GN
5047 fetch_register_operand(&ctxt->memop);
5048 }
28867cee 5049 goto mem_common;
0fe59128
AK
5050 case OpMem16:
5051 ctxt->memop.bytes = 2;
5052 goto mem_common;
5053 case OpMem32:
5054 ctxt->memop.bytes = 4;
5055 goto mem_common;
5056 case OpImmU16:
5057 rc = decode_imm(ctxt, op, 2, false);
5058 break;
5059 case OpImmU:
5060 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
5061 break;
5062 case OpSI:
5063 op->type = OP_MEM;
5064 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5065 op->addr.mem.ea =
01485a22 5066 register_address(ctxt, VCPU_REGS_RSI);
573e80fe 5067 op->addr.mem.seg = ctxt->seg_override;
0fe59128 5068 op->val = 0;
b3356bf0 5069 op->count = 1;
0fe59128 5070 break;
7fa57952
PB
5071 case OpXLat:
5072 op->type = OP_MEM;
5073 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5074 op->addr.mem.ea =
01485a22 5075 address_mask(ctxt,
7fa57952
PB
5076 reg_read(ctxt, VCPU_REGS_RBX) +
5077 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
573e80fe 5078 op->addr.mem.seg = ctxt->seg_override;
7fa57952
PB
5079 op->val = 0;
5080 break;
0fe59128
AK
5081 case OpImmFAddr:
5082 op->type = OP_IMM;
5083 op->addr.mem.ea = ctxt->_eip;
5084 op->bytes = ctxt->op_bytes + 2;
5085 insn_fetch_arr(op->valptr, op->bytes, ctxt);
5086 break;
5087 case OpMemFAddr:
5088 ctxt->memop.bytes = ctxt->op_bytes + 2;
5089 goto mem_common;
c191a7a0 5090 case OpES:
d29b9d7e 5091 op->type = OP_IMM;
c191a7a0
AK
5092 op->val = VCPU_SREG_ES;
5093 break;
5094 case OpCS:
d29b9d7e 5095 op->type = OP_IMM;
c191a7a0
AK
5096 op->val = VCPU_SREG_CS;
5097 break;
5098 case OpSS:
d29b9d7e 5099 op->type = OP_IMM;
c191a7a0
AK
5100 op->val = VCPU_SREG_SS;
5101 break;
5102 case OpDS:
d29b9d7e 5103 op->type = OP_IMM;
c191a7a0
AK
5104 op->val = VCPU_SREG_DS;
5105 break;
5106 case OpFS:
d29b9d7e 5107 op->type = OP_IMM;
c191a7a0
AK
5108 op->val = VCPU_SREG_FS;
5109 break;
5110 case OpGS:
d29b9d7e 5111 op->type = OP_IMM;
c191a7a0
AK
5112 op->val = VCPU_SREG_GS;
5113 break;
a9945549
AK
5114 case OpImplicit:
5115 /* Special instructions do their own operand decoding. */
5116 default:
5117 op->type = OP_NONE; /* Disable writeback. */
5118 break;
5119 }
5120
5121done:
5122 return rc;
5123}
5124
ef5d75cc 5125int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
dde7e6d1 5126{
dde7e6d1
AK
5127 int rc = X86EMUL_CONTINUE;
5128 int mode = ctxt->mode;
46561646 5129 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
0d7cdee8 5130 bool op_prefix = false;
573e80fe 5131 bool has_seg_override = false;
46561646 5132 struct opcode opcode;
3853be26
WL
5133 u16 dummy;
5134 struct desc_struct desc;
dde7e6d1 5135
f09ed83e
AK
5136 ctxt->memop.type = OP_NONE;
5137 ctxt->memopp = NULL;
9dac77fa 5138 ctxt->_eip = ctxt->eip;
17052f16
PB
5139 ctxt->fetch.ptr = ctxt->fetch.data;
5140 ctxt->fetch.end = ctxt->fetch.data + insn_len;
1ce19dc1 5141 ctxt->opcode_len = 1;
dc25e89e 5142 if (insn_len > 0)
9dac77fa 5143 memcpy(ctxt->fetch.data, insn, insn_len);
285ca9e9 5144 else {
9506d57d 5145 rc = __do_insn_fetch_bytes(ctxt, 1);
285ca9e9
PB
5146 if (rc != X86EMUL_CONTINUE)
5147 return rc;
5148 }
dde7e6d1
AK
5149
5150 switch (mode) {
5151 case X86EMUL_MODE_REAL:
5152 case X86EMUL_MODE_VM86:
3853be26
WL
5153 def_op_bytes = def_ad_bytes = 2;
5154 ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
5155 if (desc.d)
5156 def_op_bytes = def_ad_bytes = 4;
5157 break;
dde7e6d1
AK
5158 case X86EMUL_MODE_PROT16:
5159 def_op_bytes = def_ad_bytes = 2;
5160 break;
5161 case X86EMUL_MODE_PROT32:
5162 def_op_bytes = def_ad_bytes = 4;
5163 break;
5164#ifdef CONFIG_X86_64
5165 case X86EMUL_MODE_PROT64:
5166 def_op_bytes = 4;
5167 def_ad_bytes = 8;
5168 break;
5169#endif
5170 default:
1d2887e2 5171 return EMULATION_FAILED;
dde7e6d1
AK
5172 }
5173
9dac77fa
AK
5174 ctxt->op_bytes = def_op_bytes;
5175 ctxt->ad_bytes = def_ad_bytes;
dde7e6d1
AK
5176
5177 /* Legacy prefixes. */
5178 for (;;) {
e85a1085 5179 switch (ctxt->b = insn_fetch(u8, ctxt)) {
dde7e6d1 5180 case 0x66: /* operand-size override */
0d7cdee8 5181 op_prefix = true;
dde7e6d1 5182 /* switch between 2/4 bytes */
9dac77fa 5183 ctxt->op_bytes = def_op_bytes ^ 6;
dde7e6d1
AK
5184 break;
5185 case 0x67: /* address-size override */
5186 if (mode == X86EMUL_MODE_PROT64)
5187 /* switch between 4/8 bytes */
9dac77fa 5188 ctxt->ad_bytes = def_ad_bytes ^ 12;
dde7e6d1
AK
5189 else
5190 /* switch between 2/4 bytes */
9dac77fa 5191 ctxt->ad_bytes = def_ad_bytes ^ 6;
dde7e6d1
AK
5192 break;
5193 case 0x26: /* ES override */
5194 case 0x2e: /* CS override */
5195 case 0x36: /* SS override */
5196 case 0x3e: /* DS override */
573e80fe
BD
5197 has_seg_override = true;
5198 ctxt->seg_override = (ctxt->b >> 3) & 3;
dde7e6d1
AK
5199 break;
5200 case 0x64: /* FS override */
5201 case 0x65: /* GS override */
573e80fe
BD
5202 has_seg_override = true;
5203 ctxt->seg_override = ctxt->b & 7;
dde7e6d1
AK
5204 break;
5205 case 0x40 ... 0x4f: /* REX */
5206 if (mode != X86EMUL_MODE_PROT64)
5207 goto done_prefixes;
9dac77fa 5208 ctxt->rex_prefix = ctxt->b;
dde7e6d1
AK
5209 continue;
5210 case 0xf0: /* LOCK */
9dac77fa 5211 ctxt->lock_prefix = 1;
dde7e6d1
AK
5212 break;
5213 case 0xf2: /* REPNE/REPNZ */
dde7e6d1 5214 case 0xf3: /* REP/REPE/REPZ */
9dac77fa 5215 ctxt->rep_prefix = ctxt->b;
dde7e6d1
AK
5216 break;
5217 default:
5218 goto done_prefixes;
5219 }
5220
5221 /* Any legacy prefix after a REX prefix nullifies its effect. */
5222
9dac77fa 5223 ctxt->rex_prefix = 0;
dde7e6d1
AK
5224 }
5225
5226done_prefixes:
5227
5228 /* REX prefix. */
9dac77fa
AK
5229 if (ctxt->rex_prefix & 8)
5230 ctxt->op_bytes = 8; /* REX.W */
dde7e6d1
AK
5231
5232 /* Opcode byte(s). */
9dac77fa 5233 opcode = opcode_table[ctxt->b];
d3ad6243 5234 /* Two-byte opcode? */
9dac77fa 5235 if (ctxt->b == 0x0f) {
1ce19dc1 5236 ctxt->opcode_len = 2;
e85a1085 5237 ctxt->b = insn_fetch(u8, ctxt);
9dac77fa 5238 opcode = twobyte_table[ctxt->b];
0bc5eedb
BP
5239
5240 /* 0F_38 opcode map */
5241 if (ctxt->b == 0x38) {
5242 ctxt->opcode_len = 3;
5243 ctxt->b = insn_fetch(u8, ctxt);
5244 opcode = opcode_map_0f_38[ctxt->b];
5245 }
dde7e6d1 5246 }
9dac77fa 5247 ctxt->d = opcode.flags;
dde7e6d1 5248
9f4260e7
TY
5249 if (ctxt->d & ModRM)
5250 ctxt->modrm = insn_fetch(u8, ctxt);
5251
7fe864dc
NA
5252 /* vex-prefix instructions are not implemented */
5253 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
d14cb5df 5254 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
7fe864dc
NA
5255 ctxt->d = NotImpl;
5256 }
5257
9dac77fa
AK
5258 while (ctxt->d & GroupMask) {
5259 switch (ctxt->d & GroupMask) {
46561646 5260 case Group:
9dac77fa 5261 goffset = (ctxt->modrm >> 3) & 7;
46561646
AK
5262 opcode = opcode.u.group[goffset];
5263 break;
5264 case GroupDual:
9dac77fa
AK
5265 goffset = (ctxt->modrm >> 3) & 7;
5266 if ((ctxt->modrm >> 6) == 3)
46561646
AK
5267 opcode = opcode.u.gdual->mod3[goffset];
5268 else
5269 opcode = opcode.u.gdual->mod012[goffset];
5270 break;
5271 case RMExt:
9dac77fa 5272 goffset = ctxt->modrm & 7;
01de8b09 5273 opcode = opcode.u.group[goffset];
46561646
AK
5274 break;
5275 case Prefix:
9dac77fa 5276 if (ctxt->rep_prefix && op_prefix)
1d2887e2 5277 return EMULATION_FAILED;
9dac77fa 5278 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
46561646
AK
5279 switch (simd_prefix) {
5280 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
5281 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
5282 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
5283 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
5284 }
5285 break;
045a282c
GN
5286 case Escape:
5287 if (ctxt->modrm > 0xbf)
5288 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
5289 else
5290 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
5291 break;
39f062ff
NA
5292 case InstrDual:
5293 if ((ctxt->modrm >> 6) == 3)
5294 opcode = opcode.u.idual->mod3;
5295 else
5296 opcode = opcode.u.idual->mod012;
5297 break;
2276b511
NA
5298 case ModeDual:
5299 if (ctxt->mode == X86EMUL_MODE_PROT64)
5300 opcode = opcode.u.mdual->mode64;
5301 else
5302 opcode = opcode.u.mdual->mode32;
5303 break;
46561646 5304 default:
1d2887e2 5305 return EMULATION_FAILED;
0d7cdee8 5306 }
46561646 5307
b1ea50b2 5308 ctxt->d &= ~(u64)GroupMask;
9dac77fa 5309 ctxt->d |= opcode.flags;
0d7cdee8
AK
5310 }
5311
e24186e0
PB
5312 /* Unrecognised? */
5313 if (ctxt->d == 0)
5314 return EMULATION_FAILED;
5315
9dac77fa 5316 ctxt->execute = opcode.u.execute;
dde7e6d1 5317
3a6095a0
NA
5318 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
5319 return EMULATION_FAILED;
5320
d40a6898 5321 if (unlikely(ctxt->d &
ed9aad21
NA
5322 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
5323 No16))) {
d40a6898
PB
5324 /*
5325 * These are copied unconditionally here, and checked unconditionally
5326 * in x86_emulate_insn.
5327 */
5328 ctxt->check_perm = opcode.check_perm;
5329 ctxt->intercept = opcode.intercept;
dde7e6d1 5330
d40a6898
PB
5331 if (ctxt->d & NotImpl)
5332 return EMULATION_FAILED;
d867162c 5333
58b7075d
NA
5334 if (mode == X86EMUL_MODE_PROT64) {
5335 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
5336 ctxt->op_bytes = 8;
5337 else if (ctxt->d & NearBranch)
5338 ctxt->op_bytes = 8;
5339 }
7f9b4b75 5340
d40a6898
PB
5341 if (ctxt->d & Op3264) {
5342 if (mode == X86EMUL_MODE_PROT64)
5343 ctxt->op_bytes = 8;
5344 else
5345 ctxt->op_bytes = 4;
5346 }
5347
ed9aad21
NA
5348 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5349 ctxt->op_bytes = 4;
5350
d40a6898
PB
5351 if (ctxt->d & Sse)
5352 ctxt->op_bytes = 16;
5353 else if (ctxt->d & Mmx)
5354 ctxt->op_bytes = 8;
5355 }
1253791d 5356
dde7e6d1 5357 /* ModRM and SIB bytes. */
9dac77fa 5358 if (ctxt->d & ModRM) {
f09ed83e 5359 rc = decode_modrm(ctxt, &ctxt->memop);
573e80fe
BD
5360 if (!has_seg_override) {
5361 has_seg_override = true;
5362 ctxt->seg_override = ctxt->modrm_seg;
5363 }
9dac77fa 5364 } else if (ctxt->d & MemAbs)
f09ed83e 5365 rc = decode_abs(ctxt, &ctxt->memop);
dde7e6d1
AK
5366 if (rc != X86EMUL_CONTINUE)
5367 goto done;
5368
573e80fe
BD
5369 if (!has_seg_override)
5370 ctxt->seg_override = VCPU_SREG_DS;
dde7e6d1 5371
573e80fe 5372 ctxt->memop.addr.mem.seg = ctxt->seg_override;
dde7e6d1 5373
dde7e6d1
AK
5374 /*
5375 * Decode and fetch the source operand: register, memory
5376 * or immediate.
5377 */
0fe59128 5378 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
39f21ee5
AK
5379 if (rc != X86EMUL_CONTINUE)
5380 goto done;
5381
dde7e6d1
AK
5382 /*
5383 * Decode and fetch the second source operand: register, memory
5384 * or immediate.
5385 */
4dd6a57d 5386 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
39f21ee5
AK
5387 if (rc != X86EMUL_CONTINUE)
5388 goto done;
5389
dde7e6d1 5390 /* Decode and fetch the destination operand: register or memory. */
a9945549 5391 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
dde7e6d1 5392
d9092f52 5393 if (ctxt->rip_relative && likely(ctxt->memopp))
1c1c35ae
NA
5394 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5395 ctxt->memopp->addr.mem.ea + ctxt->_eip);
cb16c348 5396
a430c916 5397done:
1d2887e2 5398 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
dde7e6d1
AK
5399}
5400
1cb3f3ae
XG
5401bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5402{
5403 return ctxt->d & PageTable;
5404}
5405
3e2f65d5
GN
5406static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5407{
3e2f65d5
GN
5408 /* The second termination condition only applies for REPE
5409 * and REPNE. Test if the repeat string operation prefix is
5410 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5411 * corresponding termination condition according to:
5412 * - if REPE/REPZ and ZF = 0 then done
5413 * - if REPNE/REPNZ and ZF = 1 then done
5414 */
9dac77fa
AK
5415 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5416 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5417 && (((ctxt->rep_prefix == REPE_PREFIX) &&
0efb0440 5418 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
9dac77fa 5419 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
0efb0440 5420 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
3e2f65d5
GN
5421 return true;
5422
5423 return false;
5424}
5425
cbe2c9d3
AK
5426static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5427{
aabba3c6 5428 int rc;
cbe2c9d3 5429
aabba3c6 5430 rc = asm_safe("fwait");
cbe2c9d3 5431
aabba3c6 5432 if (unlikely(rc != X86EMUL_CONTINUE))
cbe2c9d3
AK
5433 return emulate_exception(ctxt, MF_VECTOR, 0, false);
5434
5435 return X86EMUL_CONTINUE;
5436}
5437
5438static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
5439 struct operand *op)
5440{
5441 if (op->type == OP_MM)
5442 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
5443}
5444
e28bbd44
AK
5445static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
5446{
5447 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
4548f63e 5448
b9fa409b
AK
5449 if (!(ctxt->d & ByteOp))
5450 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
4548f63e 5451
1a29b5b7 5452 asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
b8c0b6ae 5453 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
1a29b5b7 5454 [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
b8c0b6ae 5455 : "c"(ctxt->src2.val));
4548f63e 5456
e28bbd44 5457 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
b8c0b6ae
AK
5458 if (!fop) /* exception is returned in fop variable */
5459 return emulate_de(ctxt);
e28bbd44
AK
5460 return X86EMUL_CONTINUE;
5461}
dd856efa 5462
1498507a
BD
5463void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5464{
573e80fe
BD
5465 memset(&ctxt->rip_relative, 0,
5466 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
1498507a 5467
1498507a
BD
5468 ctxt->io_read.pos = 0;
5469 ctxt->io_read.end = 0;
1498507a
BD
5470 ctxt->mem_read.end = 0;
5471}
5472
7b105ca2 5473int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
8b4caf66 5474{
0225fb50 5475 const struct x86_emulate_ops *ops = ctxt->ops;
1b30eaa8 5476 int rc = X86EMUL_CONTINUE;
9dac77fa 5477 int saved_dst_type = ctxt->dst.type;
6ed071f0 5478 unsigned emul_flags;
8b4caf66 5479
9dac77fa 5480 ctxt->mem_read.pos = 0;
310b5d30 5481
e24186e0
PB
5482 /* LOCK prefix is allowed only with some instructions */
5483 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
35d3d4a1 5484 rc = emulate_ud(ctxt);
1161624f
GN
5485 goto done;
5486 }
5487
e24186e0 5488 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
35d3d4a1 5489 rc = emulate_ud(ctxt);
d380a5e4
GN
5490 goto done;
5491 }
5492
6ed071f0 5493 emul_flags = ctxt->ops->get_hflags(ctxt);
d40a6898
PB
5494 if (unlikely(ctxt->d &
5495 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5496 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5497 (ctxt->d & Undefined)) {
5498 rc = emulate_ud(ctxt);
5499 goto done;
5500 }
1253791d 5501
d40a6898
PB
5502 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5503 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5504 rc = emulate_ud(ctxt);
cbe2c9d3 5505 goto done;
d40a6898 5506 }
cbe2c9d3 5507
d40a6898
PB
5508 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5509 rc = emulate_nm(ctxt);
c4f035c6 5510 goto done;
d40a6898 5511 }
c4f035c6 5512
d40a6898
PB
5513 if (ctxt->d & Mmx) {
5514 rc = flush_pending_x87_faults(ctxt);
5515 if (rc != X86EMUL_CONTINUE)
5516 goto done;
5517 /*
5518 * Now that we know the fpu is exception safe, we can fetch
5519 * operands from it.
5520 */
5521 fetch_possible_mmx_operand(ctxt, &ctxt->src);
5522 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
5523 if (!(ctxt->d & Mov))
5524 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
5525 }
e92805ac 5526
6ed071f0 5527 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
d40a6898
PB
5528 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5529 X86_ICPT_PRE_EXCEPT);
5530 if (rc != X86EMUL_CONTINUE)
5531 goto done;
5532 }
8ea7d6ae 5533
64a38292
NA
5534 /* Instruction can only be executed in protected mode */
5535 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5536 rc = emulate_ud(ctxt);
5537 goto done;
5538 }
5539
d40a6898
PB
5540 /* Privileged instruction can be executed only in CPL=0 */
5541 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
68efa764
NA
5542 if (ctxt->d & PrivUD)
5543 rc = emulate_ud(ctxt);
5544 else
5545 rc = emulate_gp(ctxt, 0);
d09beabd 5546 goto done;
d40a6898 5547 }
d09beabd 5548
d40a6898 5549 /* Do instruction specific permission checks */
685bbf4a 5550 if (ctxt->d & CheckPerm) {
d40a6898
PB
5551 rc = ctxt->check_perm(ctxt);
5552 if (rc != X86EMUL_CONTINUE)
5553 goto done;
5554 }
5555
6ed071f0 5556 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
d40a6898
PB
5557 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5558 X86_ICPT_POST_EXCEPT);
5559 if (rc != X86EMUL_CONTINUE)
5560 goto done;
5561 }
5562
5563 if (ctxt->rep_prefix && (ctxt->d & String)) {
5564 /* All REP prefixes have the same first termination condition */
5565 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
428e3d08 5566 string_registers_quirk(ctxt);
d40a6898 5567 ctxt->eip = ctxt->_eip;
0efb0440 5568 ctxt->eflags &= ~X86_EFLAGS_RF;
d40a6898
PB
5569 goto done;
5570 }
b9fa9d6b 5571 }
b9fa9d6b
AK
5572 }
5573
9dac77fa
AK
5574 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5575 rc = segmented_read(ctxt, ctxt->src.addr.mem,
5576 ctxt->src.valptr, ctxt->src.bytes);
b60d513c 5577 if (rc != X86EMUL_CONTINUE)
8b4caf66 5578 goto done;
9dac77fa 5579 ctxt->src.orig_val64 = ctxt->src.val64;
8b4caf66
LV
5580 }
5581
9dac77fa
AK
5582 if (ctxt->src2.type == OP_MEM) {
5583 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5584 &ctxt->src2.val, ctxt->src2.bytes);
e35b7b9c
GN
5585 if (rc != X86EMUL_CONTINUE)
5586 goto done;
5587 }
5588
9dac77fa 5589 if ((ctxt->d & DstMask) == ImplicitOps)
8b4caf66
LV
5590 goto special_insn;
5591
5592
9dac77fa 5593 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
69f55cb1 5594 /* optimisation - avoid slow emulated read if Mov */
9dac77fa
AK
5595 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5596 &ctxt->dst.val, ctxt->dst.bytes);
c205fb7d 5597 if (rc != X86EMUL_CONTINUE) {
d44e1212
PB
5598 if (!(ctxt->d & NoWrite) &&
5599 rc == X86EMUL_PROPAGATE_FAULT &&
c205fb7d
NA
5600 ctxt->exception.vector == PF_VECTOR)
5601 ctxt->exception.error_code |= PFERR_WRITE_MASK;
69f55cb1 5602 goto done;
c205fb7d 5603 }
038e51de 5604 }
4ff6f8e6
PB
5605 /* Copy full 64-bit value for CMPXCHG8B. */
5606 ctxt->dst.orig_val64 = ctxt->dst.val64;
038e51de 5607
018a98db
AK
5608special_insn:
5609
6ed071f0 5610 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
9dac77fa 5611 rc = emulator_check_intercept(ctxt, ctxt->intercept,
8a76d7f2 5612 X86_ICPT_POST_MEMACCESS);
c4f035c6
AK
5613 if (rc != X86EMUL_CONTINUE)
5614 goto done;
5615 }
5616
b9a1ecb9 5617 if (ctxt->rep_prefix && (ctxt->d & String))
0efb0440 5618 ctxt->eflags |= X86_EFLAGS_RF;
b9a1ecb9 5619 else
0efb0440 5620 ctxt->eflags &= ~X86_EFLAGS_RF;
4467c3f1 5621
9dac77fa 5622 if (ctxt->execute) {
e28bbd44
AK
5623 if (ctxt->d & Fastop) {
5624 void (*fop)(struct fastop *) = (void *)ctxt->execute;
5625 rc = fastop(ctxt, fop);
5626 if (rc != X86EMUL_CONTINUE)
5627 goto done;
5628 goto writeback;
5629 }
9dac77fa 5630 rc = ctxt->execute(ctxt);
ef65c889
AK
5631 if (rc != X86EMUL_CONTINUE)
5632 goto done;
5633 goto writeback;
5634 }
5635
1ce19dc1 5636 if (ctxt->opcode_len == 2)
6aa8b732 5637 goto twobyte_insn;
0bc5eedb
BP
5638 else if (ctxt->opcode_len == 3)
5639 goto threebyte_insn;
6aa8b732 5640
9dac77fa 5641 switch (ctxt->b) {
b2833e3c 5642 case 0x70 ... 0x7f: /* jcc (short) */
9dac77fa 5643 if (test_cc(ctxt->b, ctxt->eflags))
234f3ce4 5644 rc = jmp_rel(ctxt, ctxt->src.val);
018a98db 5645 break;
7e0b54b1 5646 case 0x8d: /* lea r16/r32, m */
9dac77fa 5647 ctxt->dst.val = ctxt->src.addr.mem.ea;
7e0b54b1 5648 break;
3d9e77df 5649 case 0x90 ... 0x97: /* nop / xchg reg, rax */
dd856efa 5650 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
a825f5cc
NA
5651 ctxt->dst.type = OP_NONE;
5652 else
5653 rc = em_xchg(ctxt);
e4f973ae 5654 break;
e8b6fa70 5655 case 0x98: /* cbw/cwde/cdqe */
9dac77fa
AK
5656 switch (ctxt->op_bytes) {
5657 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5658 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5659 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
e8b6fa70
WY
5660 }
5661 break;
6e154e56 5662 case 0xcc: /* int3 */
5c5df76b
TY
5663 rc = emulate_int(ctxt, 3);
5664 break;
6e154e56 5665 case 0xcd: /* int n */
9dac77fa 5666 rc = emulate_int(ctxt, ctxt->src.val);
6e154e56
MG
5667 break;
5668 case 0xce: /* into */
0efb0440 5669 if (ctxt->eflags & X86_EFLAGS_OF)
5c5df76b 5670 rc = emulate_int(ctxt, 4);
6e154e56 5671 break;
1a52e051 5672 case 0xe9: /* jmp rel */
db5b0762 5673 case 0xeb: /* jmp rel short */
234f3ce4 5674 rc = jmp_rel(ctxt, ctxt->src.val);
9dac77fa 5675 ctxt->dst.type = OP_NONE; /* Disable writeback. */
1a52e051 5676 break;
111de5d6 5677 case 0xf4: /* hlt */
6c3287f7 5678 ctxt->ops->halt(ctxt);
19fdfa0d 5679 break;
111de5d6
AK
5680 case 0xf5: /* cmc */
5681 /* complement carry flag from eflags reg */
0efb0440 5682 ctxt->eflags ^= X86_EFLAGS_CF;
111de5d6
AK
5683 break;
5684 case 0xf8: /* clc */
0efb0440 5685 ctxt->eflags &= ~X86_EFLAGS_CF;
111de5d6 5686 break;
8744aa9a 5687 case 0xf9: /* stc */
0efb0440 5688 ctxt->eflags |= X86_EFLAGS_CF;
8744aa9a 5689 break;
fb4616f4 5690 case 0xfc: /* cld */
0efb0440 5691 ctxt->eflags &= ~X86_EFLAGS_DF;
fb4616f4
MG
5692 break;
5693 case 0xfd: /* std */
0efb0440 5694 ctxt->eflags |= X86_EFLAGS_DF;
fb4616f4 5695 break;
91269b8f
AK
5696 default:
5697 goto cannot_emulate;
6aa8b732 5698 }
018a98db 5699
7d9ddaed
AK
5700 if (rc != X86EMUL_CONTINUE)
5701 goto done;
5702
018a98db 5703writeback:
fb32b1ed
AK
5704 if (ctxt->d & SrcWrite) {
5705 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5706 rc = writeback(ctxt, &ctxt->src);
5707 if (rc != X86EMUL_CONTINUE)
5708 goto done;
5709 }
ee212297
NA
5710 if (!(ctxt->d & NoWrite)) {
5711 rc = writeback(ctxt, &ctxt->dst);
5712 if (rc != X86EMUL_CONTINUE)
5713 goto done;
5714 }
018a98db 5715
5cd21917
GN
5716 /*
5717 * restore dst type in case the decoding will be reused
5718 * (happens for string instruction )
5719 */
9dac77fa 5720 ctxt->dst.type = saved_dst_type;
5cd21917 5721
9dac77fa 5722 if ((ctxt->d & SrcMask) == SrcSI)
f3bd64c6 5723 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
a682e354 5724
9dac77fa 5725 if ((ctxt->d & DstMask) == DstDI)
f3bd64c6 5726 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
d9271123 5727
9dac77fa 5728 if (ctxt->rep_prefix && (ctxt->d & String)) {
b3356bf0 5729 unsigned int count;
9dac77fa 5730 struct read_cache *r = &ctxt->io_read;
b3356bf0
GN
5731 if ((ctxt->d & SrcMask) == SrcSI)
5732 count = ctxt->src.count;
5733 else
5734 count = ctxt->dst.count;
01485a22 5735 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
3e2f65d5 5736
d2ddd1c4
GN
5737 if (!string_insn_completed(ctxt)) {
5738 /*
5739 * Re-enter guest when pio read ahead buffer is empty
5740 * or, if it is not used, after each 1024 iteration.
5741 */
dd856efa 5742 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
d2ddd1c4
GN
5743 (r->end == 0 || r->end != r->pos)) {
5744 /*
5745 * Reset read cache. Usually happens before
5746 * decode, but since instruction is restarted
5747 * we have to do it here.
5748 */
9dac77fa 5749 ctxt->mem_read.end = 0;
dd856efa 5750 writeback_registers(ctxt);
d2ddd1c4
GN
5751 return EMULATION_RESTART;
5752 }
5753 goto done; /* skip rip writeback */
0fa6ccbd 5754 }
0efb0440 5755 ctxt->eflags &= ~X86_EFLAGS_RF;
5cd21917 5756 }
d2ddd1c4 5757
9dac77fa 5758 ctxt->eip = ctxt->_eip;
018a98db
AK
5759
5760done:
e0ad0b47
PB
5761 if (rc == X86EMUL_PROPAGATE_FAULT) {
5762 WARN_ON(ctxt->exception.vector > 0x1f);
da9cb575 5763 ctxt->have_exception = true;
e0ad0b47 5764 }
775fde86
JR
5765 if (rc == X86EMUL_INTERCEPTED)
5766 return EMULATION_INTERCEPTED;
5767
dd856efa
AK
5768 if (rc == X86EMUL_CONTINUE)
5769 writeback_registers(ctxt);
5770
d2ddd1c4 5771 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
6aa8b732
AK
5772
5773twobyte_insn:
9dac77fa 5774 switch (ctxt->b) {
018a98db 5775 case 0x09: /* wbinvd */
cfb22375 5776 (ctxt->ops->wbinvd)(ctxt);
f5f48ee1
SY
5777 break;
5778 case 0x08: /* invd */
018a98db
AK
5779 case 0x0d: /* GrpP (prefetch) */
5780 case 0x18: /* Grp16 (prefetch/nop) */
103f98ea 5781 case 0x1f: /* nop */
018a98db
AK
5782 break;
5783 case 0x20: /* mov cr, reg */
9dac77fa 5784 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
018a98db 5785 break;
6aa8b732 5786 case 0x21: /* mov from dr to reg */
9dac77fa 5787 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
6aa8b732 5788 break;
6aa8b732 5789 case 0x40 ... 0x4f: /* cmov */
140bad89
NA
5790 if (test_cc(ctxt->b, ctxt->eflags))
5791 ctxt->dst.val = ctxt->src.val;
b91aa14d 5792 else if (ctxt->op_bytes != 4)
9dac77fa 5793 ctxt->dst.type = OP_NONE; /* no writeback */
6aa8b732 5794 break;
b2833e3c 5795 case 0x80 ... 0x8f: /* jnz rel, etc*/
9dac77fa 5796 if (test_cc(ctxt->b, ctxt->eflags))
234f3ce4 5797 rc = jmp_rel(ctxt, ctxt->src.val);
018a98db 5798 break;
ee45b58e 5799 case 0x90 ... 0x9f: /* setcc r/m8 */
9dac77fa 5800 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
ee45b58e 5801 break;
6aa8b732 5802 case 0xb6 ... 0xb7: /* movzx */
9dac77fa 5803 ctxt->dst.bytes = ctxt->op_bytes;
361cad2b 5804 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
9dac77fa 5805 : (u16) ctxt->src.val;
6aa8b732 5806 break;
6aa8b732 5807 case 0xbe ... 0xbf: /* movsx */
9dac77fa 5808 ctxt->dst.bytes = ctxt->op_bytes;
361cad2b 5809 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
9dac77fa 5810 (s16) ctxt->src.val;
6aa8b732 5811 break;
91269b8f
AK
5812 default:
5813 goto cannot_emulate;
6aa8b732 5814 }
7d9ddaed 5815
0bc5eedb
BP
5816threebyte_insn:
5817
7d9ddaed
AK
5818 if (rc != X86EMUL_CONTINUE)
5819 goto done;
5820
6aa8b732
AK
5821 goto writeback;
5822
5823cannot_emulate:
a0c0ab2f 5824 return EMULATION_FAILED;
6aa8b732 5825}
dd856efa
AK
5826
5827void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5828{
5829 invalidate_registers(ctxt);
5830}
5831
5832void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5833{
5834 writeback_registers(ctxt);
5835}
0f89b207
TL
5836
5837bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5838{
5839 if (ctxt->rep_prefix && (ctxt->d & String))
5840 return false;
5841
5842 if (ctxt->d & TwoMemOp)
5843 return false;
5844
5845 return true;
5846}