KVM: x86 emulator: drop parentheses in repreat macros
[linux-2.6-block.git] / arch / x86 / kvm / emulate.c
... / ...
CommitLineData
1/******************************************************************************
2 * emulate.c
3 *
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5 *
6 * Copyright (c) 2005 Keir Fraser
7 *
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
10 *
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affilates.
13 *
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
16 *
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
19 *
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
21 */
22
23#ifndef __KERNEL__
24#include <stdio.h>
25#include <stdint.h>
26#include <public/xen.h>
27#define DPRINTF(_f, _a ...) printf(_f , ## _a)
28#else
29#include <linux/kvm_host.h>
30#include "kvm_cache_regs.h"
31#define DPRINTF(x...) do {} while (0)
32#endif
33#include <linux/module.h>
34#include <asm/kvm_emulate.h>
35
36#include "x86.h"
37#include "tss.h"
38
39/*
40 * Opcode effective-address decode tables.
41 * Note that we only emulate instructions that have at least one memory
42 * operand (excluding implicit stack references). We assume that stack
43 * references and instruction fetches will never occur in special memory
44 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
45 * not be handled.
46 */
47
48/* Operand sizes: 8-bit operands or specified/overridden size. */
49#define ByteOp (1<<16) /* 8-bit operands. */
50/* Destination operand type. */
51#define ImplicitOps (1<<17) /* Implicit in opcode. No generic decode. */
52#define DstReg (2<<17) /* Register operand. */
53#define DstMem (3<<17) /* Memory operand. */
54#define DstAcc (4<<17) /* Destination Accumulator */
55#define DstDI (5<<17) /* Destination is in ES:(E)DI */
56#define DstMem64 (6<<17) /* 64bit memory operand */
57#define DstMask (7<<17)
58/* Source operand type. */
59#define SrcNone (0<<4) /* No source operand. */
60#define SrcImplicit (0<<4) /* Source operand is implicit in the opcode. */
61#define SrcReg (1<<4) /* Register operand. */
62#define SrcMem (2<<4) /* Memory operand. */
63#define SrcMem16 (3<<4) /* Memory operand (16-bit). */
64#define SrcMem32 (4<<4) /* Memory operand (32-bit). */
65#define SrcImm (5<<4) /* Immediate operand. */
66#define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
67#define SrcOne (7<<4) /* Implied '1' */
68#define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
69#define SrcImmU (9<<4) /* Immediate operand, unsigned */
70#define SrcSI (0xa<<4) /* Source is in the DS:RSI */
71#define SrcImmFAddr (0xb<<4) /* Source is immediate far address */
72#define SrcMemFAddr (0xc<<4) /* Source is far address in memory */
73#define SrcAcc (0xd<<4) /* Source Accumulator */
74#define SrcMask (0xf<<4)
75/* Generic ModRM decode. */
76#define ModRM (1<<8)
77/* Destination is only written; never read. */
78#define Mov (1<<9)
79#define BitOp (1<<10)
80#define MemAbs (1<<11) /* Memory operand is absolute displacement */
81#define String (1<<12) /* String instruction (rep capable) */
82#define Stack (1<<13) /* Stack instruction (push/pop) */
83#define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
84#define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
85#define GroupMask 0x0f /* Group number stored in bits 0:3 */
86/* Misc flags */
87#define Undefined (1<<25) /* No Such Instruction */
88#define Lock (1<<26) /* lock prefix is allowed for the instruction */
89#define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
90#define No64 (1<<28)
91/* Source 2 operand type */
92#define Src2None (0<<29)
93#define Src2CL (1<<29)
94#define Src2ImmByte (2<<29)
95#define Src2One (3<<29)
96#define Src2Mask (7<<29)
97
98#define X2(x) x, x
99#define X3(x) X2(x), x
100#define X4(x) X2(x), X2(x)
101#define X5(x) X4(x), x
102#define X6(x) X4(x), X2(x)
103#define X7(x) X4(x), X3(x)
104#define X8(x) X4(x), X4(x)
105#define X16(x) X8(x), X8(x)
106
107enum {
108 Group1, Group1A, Group3, Group4, Group5, Group7, Group8, Group9,
109};
110
111static u32 opcode_table[256] = {
112 /* 0x00 - 0x07 */
113 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
114 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
115 ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
116 ImplicitOps | Stack | No64, ImplicitOps | Stack | No64,
117 /* 0x08 - 0x0F */
118 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
119 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
120 ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
121 ImplicitOps | Stack | No64, 0,
122 /* 0x10 - 0x17 */
123 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
124 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
125 ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
126 ImplicitOps | Stack | No64, ImplicitOps | Stack | No64,
127 /* 0x18 - 0x1F */
128 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
129 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
130 ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
131 ImplicitOps | Stack | No64, ImplicitOps | Stack | No64,
132 /* 0x20 - 0x27 */
133 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
134 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
135 ByteOp | DstAcc | SrcImmByte, DstAcc | SrcImm, 0, 0,
136 /* 0x28 - 0x2F */
137 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
138 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
139 ByteOp | DstAcc | SrcImmByte, DstAcc | SrcImm, 0, 0,
140 /* 0x30 - 0x37 */
141 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
142 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
143 ByteOp | DstAcc | SrcImmByte, DstAcc | SrcImm, 0, 0,
144 /* 0x38 - 0x3F */
145 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
146 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
147 ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
148 0, 0,
149 /* 0x40 - 0x4F */
150 X16(DstReg),
151 /* 0x50 - 0x57 */
152 X8(SrcReg | Stack),
153 /* 0x58 - 0x5F */
154 X8(DstReg | Stack),
155 /* 0x60 - 0x67 */
156 ImplicitOps | Stack | No64, ImplicitOps | Stack | No64,
157 0, DstReg | SrcMem32 | ModRM | Mov /* movsxd (x86/64) */ ,
158 0, 0, 0, 0,
159 /* 0x68 - 0x6F */
160 SrcImm | Mov | Stack, 0, SrcImmByte | Mov | Stack, 0,
161 DstDI | ByteOp | Mov | String, DstDI | Mov | String, /* insb, insw/insd */
162 SrcSI | ByteOp | ImplicitOps | String, SrcSI | ImplicitOps | String, /* outsb, outsw/outsd */
163 /* 0x70 - 0x7F */
164 X16(SrcImmByte),
165 /* 0x80 - 0x87 */
166 ByteOp | DstMem | SrcImm | ModRM | Group | Group1,
167 DstMem | SrcImm | ModRM | Group | Group1,
168 ByteOp | DstMem | SrcImm | ModRM | No64 | Group | Group1,
169 DstMem | SrcImmByte | ModRM | Group | Group1,
170 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
171 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
172 /* 0x88 - 0x8F */
173 ByteOp | DstMem | SrcReg | ModRM | Mov, DstMem | SrcReg | ModRM | Mov,
174 ByteOp | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
175 DstMem | SrcNone | ModRM | Mov, ModRM | DstReg,
176 ImplicitOps | SrcMem16 | ModRM, Group | Group1A,
177 /* 0x90 - 0x97 */
178 DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg,
179 /* 0x98 - 0x9F */
180 0, 0, SrcImmFAddr | No64, 0,
181 ImplicitOps | Stack, ImplicitOps | Stack, 0, 0,
182 /* 0xA0 - 0xA7 */
183 ByteOp | DstAcc | SrcMem | Mov | MemAbs, DstAcc | SrcMem | Mov | MemAbs,
184 ByteOp | DstMem | SrcAcc | Mov | MemAbs, DstMem | SrcAcc | Mov | MemAbs,
185 ByteOp | SrcSI | DstDI | Mov | String, SrcSI | DstDI | Mov | String,
186 ByteOp | SrcSI | DstDI | String, SrcSI | DstDI | String,
187 /* 0xA8 - 0xAF */
188 DstAcc | SrcImmByte | ByteOp, DstAcc | SrcImm, ByteOp | DstDI | Mov | String, DstDI | Mov | String,
189 ByteOp | SrcSI | DstAcc | Mov | String, SrcSI | DstAcc | Mov | String,
190 ByteOp | DstDI | String, DstDI | String,
191 /* 0xB0 - 0xB7 */
192 X8(ByteOp | DstReg | SrcImm | Mov),
193 /* 0xB8 - 0xBF */
194 X8(DstReg | SrcImm | Mov),
195 /* 0xC0 - 0xC7 */
196 ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM,
197 0, ImplicitOps | Stack, 0, 0,
198 ByteOp | DstMem | SrcImm | ModRM | Mov, DstMem | SrcImm | ModRM | Mov,
199 /* 0xC8 - 0xCF */
200 0, 0, 0, ImplicitOps | Stack,
201 ImplicitOps, SrcImmByte, ImplicitOps | No64, ImplicitOps,
202 /* 0xD0 - 0xD7 */
203 ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM,
204 ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM,
205 0, 0, 0, 0,
206 /* 0xD8 - 0xDF */
207 0, 0, 0, 0, 0, 0, 0, 0,
208 /* 0xE0 - 0xE7 */
209 0, 0, 0, 0,
210 ByteOp | SrcImmUByte | DstAcc, SrcImmUByte | DstAcc,
211 ByteOp | SrcImmUByte | DstAcc, SrcImmUByte | DstAcc,
212 /* 0xE8 - 0xEF */
213 SrcImm | Stack, SrcImm | ImplicitOps,
214 SrcImmFAddr | No64, SrcImmByte | ImplicitOps,
215 SrcNone | ByteOp | DstAcc, SrcNone | DstAcc,
216 SrcNone | ByteOp | DstAcc, SrcNone | DstAcc,
217 /* 0xF0 - 0xF7 */
218 0, 0, 0, 0,
219 ImplicitOps | Priv, ImplicitOps, ByteOp | Group | Group3, Group | Group3,
220 /* 0xF8 - 0xFF */
221 ImplicitOps, 0, ImplicitOps, ImplicitOps,
222 ImplicitOps, ImplicitOps, Group | Group4, Group | Group5,
223};
224
225static u32 twobyte_table[256] = {
226 /* 0x00 - 0x0F */
227 0, Group | GroupDual | Group7, 0, 0,
228 0, ImplicitOps, ImplicitOps | Priv, 0,
229 ImplicitOps | Priv, ImplicitOps | Priv, 0, 0,
230 0, ImplicitOps | ModRM, 0, 0,
231 /* 0x10 - 0x1F */
232 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0,
233 /* 0x20 - 0x2F */
234 ModRM | ImplicitOps | Priv, ModRM | Priv,
235 ModRM | ImplicitOps | Priv, ModRM | Priv,
236 0, 0, 0, 0,
237 0, 0, 0, 0, 0, 0, 0, 0,
238 /* 0x30 - 0x3F */
239 ImplicitOps | Priv, 0, ImplicitOps | Priv, 0,
240 ImplicitOps, ImplicitOps | Priv, 0, 0,
241 0, 0, 0, 0, 0, 0, 0, 0,
242 /* 0x40 - 0x4F */
243 X16(DstReg | SrcMem | ModRM | Mov),
244 /* 0x50 - 0x5F */
245 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
246 /* 0x60 - 0x6F */
247 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
248 /* 0x70 - 0x7F */
249 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
250 /* 0x80 - 0x8F */
251 X16(SrcImm),
252 /* 0x90 - 0x9F */
253 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
254 /* 0xA0 - 0xA7 */
255 ImplicitOps | Stack, ImplicitOps | Stack,
256 0, DstMem | SrcReg | ModRM | BitOp,
257 DstMem | SrcReg | Src2ImmByte | ModRM,
258 DstMem | SrcReg | Src2CL | ModRM, 0, 0,
259 /* 0xA8 - 0xAF */
260 ImplicitOps | Stack, ImplicitOps | Stack,
261 0, DstMem | SrcReg | ModRM | BitOp | Lock,
262 DstMem | SrcReg | Src2ImmByte | ModRM,
263 DstMem | SrcReg | Src2CL | ModRM,
264 ModRM, 0,
265 /* 0xB0 - 0xB7 */
266 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
267 0, DstMem | SrcReg | ModRM | BitOp | Lock,
268 0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov,
269 DstReg | SrcMem16 | ModRM | Mov,
270 /* 0xB8 - 0xBF */
271 0, 0,
272 Group | Group8, DstMem | SrcReg | ModRM | BitOp | Lock,
273 0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov,
274 DstReg | SrcMem16 | ModRM | Mov,
275 /* 0xC0 - 0xCF */
276 0, 0, 0, DstMem | SrcReg | ModRM | Mov,
277 0, 0, 0, Group | GroupDual | Group9,
278 0, 0, 0, 0, 0, 0, 0, 0,
279 /* 0xD0 - 0xDF */
280 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
281 /* 0xE0 - 0xEF */
282 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
283 /* 0xF0 - 0xFF */
284 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
285};
286
287static u32 group_table[] = {
288 [Group1*8] =
289 X7(Lock), 0,
290 [Group1A*8] =
291 DstMem | SrcNone | ModRM | Mov | Stack, 0, 0, 0, 0, 0, 0, 0,
292 [Group3*8] =
293 DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM,
294 DstMem | SrcNone | ModRM | Lock, DstMem | SrcNone | ModRM | Lock,
295 X4(Undefined),
296 [Group4*8] =
297 ByteOp | DstMem | SrcNone | ModRM | Lock, ByteOp | DstMem | SrcNone | ModRM | Lock,
298 0, 0, 0, 0, 0, 0,
299 [Group5*8] =
300 DstMem | SrcNone | ModRM | Lock, DstMem | SrcNone | ModRM | Lock,
301 SrcMem | ModRM | Stack, 0,
302 SrcMem | ModRM | Stack, SrcMemFAddr | ModRM | ImplicitOps,
303 SrcMem | ModRM | Stack, 0,
304 [Group7*8] =
305 0, 0, ModRM | SrcMem | Priv, ModRM | SrcMem | Priv,
306 SrcNone | ModRM | DstMem | Mov, 0,
307 SrcMem16 | ModRM | Mov | Priv, SrcMem | ModRM | ByteOp | Priv,
308 [Group8*8] =
309 0, 0, 0, 0,
310 DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM | Lock,
311 DstMem | SrcImmByte | ModRM | Lock, DstMem | SrcImmByte | ModRM | Lock,
312 [Group9*8] =
313 0, DstMem64 | ModRM | Lock, 0, 0, 0, 0, 0, 0,
314};
315
316static u32 group2_table[] = {
317 [Group7*8] =
318 SrcNone | ModRM | Priv, 0, 0, SrcNone | ModRM | Priv,
319 SrcNone | ModRM | DstMem | Mov, 0,
320 SrcMem16 | ModRM | Mov | Priv, 0,
321 [Group9*8] =
322 0, 0, 0, 0, 0, 0, 0, 0,
323};
324
325/* EFLAGS bit definitions. */
326#define EFLG_ID (1<<21)
327#define EFLG_VIP (1<<20)
328#define EFLG_VIF (1<<19)
329#define EFLG_AC (1<<18)
330#define EFLG_VM (1<<17)
331#define EFLG_RF (1<<16)
332#define EFLG_IOPL (3<<12)
333#define EFLG_NT (1<<14)
334#define EFLG_OF (1<<11)
335#define EFLG_DF (1<<10)
336#define EFLG_IF (1<<9)
337#define EFLG_TF (1<<8)
338#define EFLG_SF (1<<7)
339#define EFLG_ZF (1<<6)
340#define EFLG_AF (1<<4)
341#define EFLG_PF (1<<2)
342#define EFLG_CF (1<<0)
343
344#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
345#define EFLG_RESERVED_ONE_MASK 2
346
347/*
348 * Instruction emulation:
349 * Most instructions are emulated directly via a fragment of inline assembly
350 * code. This allows us to save/restore EFLAGS and thus very easily pick up
351 * any modified flags.
352 */
353
354#if defined(CONFIG_X86_64)
355#define _LO32 "k" /* force 32-bit operand */
356#define _STK "%%rsp" /* stack pointer */
357#elif defined(__i386__)
358#define _LO32 "" /* force 32-bit operand */
359#define _STK "%%esp" /* stack pointer */
360#endif
361
362/*
363 * These EFLAGS bits are restored from saved value during emulation, and
364 * any changes are written back to the saved value after emulation.
365 */
366#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
367
368/* Before executing instruction: restore necessary bits in EFLAGS. */
369#define _PRE_EFLAGS(_sav, _msk, _tmp) \
370 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
371 "movl %"_sav",%"_LO32 _tmp"; " \
372 "push %"_tmp"; " \
373 "push %"_tmp"; " \
374 "movl %"_msk",%"_LO32 _tmp"; " \
375 "andl %"_LO32 _tmp",("_STK"); " \
376 "pushf; " \
377 "notl %"_LO32 _tmp"; " \
378 "andl %"_LO32 _tmp",("_STK"); " \
379 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
380 "pop %"_tmp"; " \
381 "orl %"_LO32 _tmp",("_STK"); " \
382 "popf; " \
383 "pop %"_sav"; "
384
385/* After executing instruction: write-back necessary bits in EFLAGS. */
386#define _POST_EFLAGS(_sav, _msk, _tmp) \
387 /* _sav |= EFLAGS & _msk; */ \
388 "pushf; " \
389 "pop %"_tmp"; " \
390 "andl %"_msk",%"_LO32 _tmp"; " \
391 "orl %"_LO32 _tmp",%"_sav"; "
392
393#ifdef CONFIG_X86_64
394#define ON64(x) x
395#else
396#define ON64(x)
397#endif
398
399#define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
400 do { \
401 __asm__ __volatile__ ( \
402 _PRE_EFLAGS("0", "4", "2") \
403 _op _suffix " %"_x"3,%1; " \
404 _POST_EFLAGS("0", "4", "2") \
405 : "=m" (_eflags), "=m" ((_dst).val), \
406 "=&r" (_tmp) \
407 : _y ((_src).val), "i" (EFLAGS_MASK)); \
408 } while (0)
409
410
411/* Raw emulation: instruction has two explicit operands. */
412#define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
413 do { \
414 unsigned long _tmp; \
415 \
416 switch ((_dst).bytes) { \
417 case 2: \
418 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
419 break; \
420 case 4: \
421 ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l"); \
422 break; \
423 case 8: \
424 ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q")); \
425 break; \
426 } \
427 } while (0)
428
429#define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
430 do { \
431 unsigned long _tmp; \
432 switch ((_dst).bytes) { \
433 case 1: \
434 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
435 break; \
436 default: \
437 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
438 _wx, _wy, _lx, _ly, _qx, _qy); \
439 break; \
440 } \
441 } while (0)
442
443/* Source operand is byte-sized and may be restricted to just %cl. */
444#define emulate_2op_SrcB(_op, _src, _dst, _eflags) \
445 __emulate_2op(_op, _src, _dst, _eflags, \
446 "b", "c", "b", "c", "b", "c", "b", "c")
447
448/* Source operand is byte, word, long or quad sized. */
449#define emulate_2op_SrcV(_op, _src, _dst, _eflags) \
450 __emulate_2op(_op, _src, _dst, _eflags, \
451 "b", "q", "w", "r", _LO32, "r", "", "r")
452
453/* Source operand is word, long or quad sized. */
454#define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \
455 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
456 "w", "r", _LO32, "r", "", "r")
457
458/* Instruction has three operands and one operand is stored in ECX register */
459#define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \
460 do { \
461 unsigned long _tmp; \
462 _type _clv = (_cl).val; \
463 _type _srcv = (_src).val; \
464 _type _dstv = (_dst).val; \
465 \
466 __asm__ __volatile__ ( \
467 _PRE_EFLAGS("0", "5", "2") \
468 _op _suffix " %4,%1 \n" \
469 _POST_EFLAGS("0", "5", "2") \
470 : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \
471 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
472 ); \
473 \
474 (_cl).val = (unsigned long) _clv; \
475 (_src).val = (unsigned long) _srcv; \
476 (_dst).val = (unsigned long) _dstv; \
477 } while (0)
478
479#define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \
480 do { \
481 switch ((_dst).bytes) { \
482 case 2: \
483 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
484 "w", unsigned short); \
485 break; \
486 case 4: \
487 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
488 "l", unsigned int); \
489 break; \
490 case 8: \
491 ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
492 "q", unsigned long)); \
493 break; \
494 } \
495 } while (0)
496
497#define __emulate_1op(_op, _dst, _eflags, _suffix) \
498 do { \
499 unsigned long _tmp; \
500 \
501 __asm__ __volatile__ ( \
502 _PRE_EFLAGS("0", "3", "2") \
503 _op _suffix " %1; " \
504 _POST_EFLAGS("0", "3", "2") \
505 : "=m" (_eflags), "+m" ((_dst).val), \
506 "=&r" (_tmp) \
507 : "i" (EFLAGS_MASK)); \
508 } while (0)
509
510/* Instruction has only one explicit operand (no source operand). */
511#define emulate_1op(_op, _dst, _eflags) \
512 do { \
513 switch ((_dst).bytes) { \
514 case 1: __emulate_1op(_op, _dst, _eflags, "b"); break; \
515 case 2: __emulate_1op(_op, _dst, _eflags, "w"); break; \
516 case 4: __emulate_1op(_op, _dst, _eflags, "l"); break; \
517 case 8: ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
518 } \
519 } while (0)
520
521/* Fetch next part of the instruction being emulated. */
522#define insn_fetch(_type, _size, _eip) \
523({ unsigned long _x; \
524 rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
525 if (rc != X86EMUL_CONTINUE) \
526 goto done; \
527 (_eip) += (_size); \
528 (_type)_x; \
529})
530
531#define insn_fetch_arr(_arr, _size, _eip) \
532({ rc = do_insn_fetch(ctxt, ops, (_eip), _arr, (_size)); \
533 if (rc != X86EMUL_CONTINUE) \
534 goto done; \
535 (_eip) += (_size); \
536})
537
538static inline unsigned long ad_mask(struct decode_cache *c)
539{
540 return (1UL << (c->ad_bytes << 3)) - 1;
541}
542
543/* Access/update address held in a register, based on addressing mode. */
544static inline unsigned long
545address_mask(struct decode_cache *c, unsigned long reg)
546{
547 if (c->ad_bytes == sizeof(unsigned long))
548 return reg;
549 else
550 return reg & ad_mask(c);
551}
552
553static inline unsigned long
554register_address(struct decode_cache *c, unsigned long base, unsigned long reg)
555{
556 return base + address_mask(c, reg);
557}
558
559static inline void
560register_address_increment(struct decode_cache *c, unsigned long *reg, int inc)
561{
562 if (c->ad_bytes == sizeof(unsigned long))
563 *reg += inc;
564 else
565 *reg = (*reg & ~ad_mask(c)) | ((*reg + inc) & ad_mask(c));
566}
567
568static inline void jmp_rel(struct decode_cache *c, int rel)
569{
570 register_address_increment(c, &c->eip, rel);
571}
572
573static void set_seg_override(struct decode_cache *c, int seg)
574{
575 c->has_seg_override = true;
576 c->seg_override = seg;
577}
578
579static unsigned long seg_base(struct x86_emulate_ctxt *ctxt,
580 struct x86_emulate_ops *ops, int seg)
581{
582 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
583 return 0;
584
585 return ops->get_cached_segment_base(seg, ctxt->vcpu);
586}
587
588static unsigned long seg_override_base(struct x86_emulate_ctxt *ctxt,
589 struct x86_emulate_ops *ops,
590 struct decode_cache *c)
591{
592 if (!c->has_seg_override)
593 return 0;
594
595 return seg_base(ctxt, ops, c->seg_override);
596}
597
598static unsigned long es_base(struct x86_emulate_ctxt *ctxt,
599 struct x86_emulate_ops *ops)
600{
601 return seg_base(ctxt, ops, VCPU_SREG_ES);
602}
603
604static unsigned long ss_base(struct x86_emulate_ctxt *ctxt,
605 struct x86_emulate_ops *ops)
606{
607 return seg_base(ctxt, ops, VCPU_SREG_SS);
608}
609
610static void emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
611 u32 error, bool valid)
612{
613 ctxt->exception = vec;
614 ctxt->error_code = error;
615 ctxt->error_code_valid = valid;
616 ctxt->restart = false;
617}
618
619static void emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
620{
621 emulate_exception(ctxt, GP_VECTOR, err, true);
622}
623
624static void emulate_pf(struct x86_emulate_ctxt *ctxt, unsigned long addr,
625 int err)
626{
627 ctxt->cr2 = addr;
628 emulate_exception(ctxt, PF_VECTOR, err, true);
629}
630
631static void emulate_ud(struct x86_emulate_ctxt *ctxt)
632{
633 emulate_exception(ctxt, UD_VECTOR, 0, false);
634}
635
636static void emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
637{
638 emulate_exception(ctxt, TS_VECTOR, err, true);
639}
640
641static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
642 struct x86_emulate_ops *ops,
643 unsigned long eip, u8 *dest)
644{
645 struct fetch_cache *fc = &ctxt->decode.fetch;
646 int rc;
647 int size, cur_size;
648
649 if (eip == fc->end) {
650 cur_size = fc->end - fc->start;
651 size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip));
652 rc = ops->fetch(ctxt->cs_base + eip, fc->data + cur_size,
653 size, ctxt->vcpu, NULL);
654 if (rc != X86EMUL_CONTINUE)
655 return rc;
656 fc->end += size;
657 }
658 *dest = fc->data[eip - fc->start];
659 return X86EMUL_CONTINUE;
660}
661
662static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
663 struct x86_emulate_ops *ops,
664 unsigned long eip, void *dest, unsigned size)
665{
666 int rc;
667
668 /* x86 instructions are limited to 15 bytes. */
669 if (eip + size - ctxt->eip > 15)
670 return X86EMUL_UNHANDLEABLE;
671 while (size--) {
672 rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++);
673 if (rc != X86EMUL_CONTINUE)
674 return rc;
675 }
676 return X86EMUL_CONTINUE;
677}
678
679/*
680 * Given the 'reg' portion of a ModRM byte, and a register block, return a
681 * pointer into the block that addresses the relevant register.
682 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
683 */
684static void *decode_register(u8 modrm_reg, unsigned long *regs,
685 int highbyte_regs)
686{
687 void *p;
688
689 p = &regs[modrm_reg];
690 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
691 p = (unsigned char *)&regs[modrm_reg & 3] + 1;
692 return p;
693}
694
695static int read_descriptor(struct x86_emulate_ctxt *ctxt,
696 struct x86_emulate_ops *ops,
697 void *ptr,
698 u16 *size, unsigned long *address, int op_bytes)
699{
700 int rc;
701
702 if (op_bytes == 2)
703 op_bytes = 3;
704 *address = 0;
705 rc = ops->read_std((unsigned long)ptr, (unsigned long *)size, 2,
706 ctxt->vcpu, NULL);
707 if (rc != X86EMUL_CONTINUE)
708 return rc;
709 rc = ops->read_std((unsigned long)ptr + 2, address, op_bytes,
710 ctxt->vcpu, NULL);
711 return rc;
712}
713
714static int test_cc(unsigned int condition, unsigned int flags)
715{
716 int rc = 0;
717
718 switch ((condition & 15) >> 1) {
719 case 0: /* o */
720 rc |= (flags & EFLG_OF);
721 break;
722 case 1: /* b/c/nae */
723 rc |= (flags & EFLG_CF);
724 break;
725 case 2: /* z/e */
726 rc |= (flags & EFLG_ZF);
727 break;
728 case 3: /* be/na */
729 rc |= (flags & (EFLG_CF|EFLG_ZF));
730 break;
731 case 4: /* s */
732 rc |= (flags & EFLG_SF);
733 break;
734 case 5: /* p/pe */
735 rc |= (flags & EFLG_PF);
736 break;
737 case 7: /* le/ng */
738 rc |= (flags & EFLG_ZF);
739 /* fall through */
740 case 6: /* l/nge */
741 rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
742 break;
743 }
744
745 /* Odd condition identifiers (lsb == 1) have inverted sense. */
746 return (!!rc ^ (condition & 1));
747}
748
749static void decode_register_operand(struct operand *op,
750 struct decode_cache *c,
751 int inhibit_bytereg)
752{
753 unsigned reg = c->modrm_reg;
754 int highbyte_regs = c->rex_prefix == 0;
755
756 if (!(c->d & ModRM))
757 reg = (c->b & 7) | ((c->rex_prefix & 1) << 3);
758 op->type = OP_REG;
759 if ((c->d & ByteOp) && !inhibit_bytereg) {
760 op->ptr = decode_register(reg, c->regs, highbyte_regs);
761 op->val = *(u8 *)op->ptr;
762 op->bytes = 1;
763 } else {
764 op->ptr = decode_register(reg, c->regs, 0);
765 op->bytes = c->op_bytes;
766 switch (op->bytes) {
767 case 2:
768 op->val = *(u16 *)op->ptr;
769 break;
770 case 4:
771 op->val = *(u32 *)op->ptr;
772 break;
773 case 8:
774 op->val = *(u64 *) op->ptr;
775 break;
776 }
777 }
778 op->orig_val = op->val;
779}
780
781static int decode_modrm(struct x86_emulate_ctxt *ctxt,
782 struct x86_emulate_ops *ops)
783{
784 struct decode_cache *c = &ctxt->decode;
785 u8 sib;
786 int index_reg = 0, base_reg = 0, scale;
787 int rc = X86EMUL_CONTINUE;
788
789 if (c->rex_prefix) {
790 c->modrm_reg = (c->rex_prefix & 4) << 1; /* REX.R */
791 index_reg = (c->rex_prefix & 2) << 2; /* REX.X */
792 c->modrm_rm = base_reg = (c->rex_prefix & 1) << 3; /* REG.B */
793 }
794
795 c->modrm = insn_fetch(u8, 1, c->eip);
796 c->modrm_mod |= (c->modrm & 0xc0) >> 6;
797 c->modrm_reg |= (c->modrm & 0x38) >> 3;
798 c->modrm_rm |= (c->modrm & 0x07);
799 c->modrm_ea = 0;
800 c->use_modrm_ea = 1;
801
802 if (c->modrm_mod == 3) {
803 c->modrm_ptr = decode_register(c->modrm_rm,
804 c->regs, c->d & ByteOp);
805 c->modrm_val = *(unsigned long *)c->modrm_ptr;
806 return rc;
807 }
808
809 if (c->ad_bytes == 2) {
810 unsigned bx = c->regs[VCPU_REGS_RBX];
811 unsigned bp = c->regs[VCPU_REGS_RBP];
812 unsigned si = c->regs[VCPU_REGS_RSI];
813 unsigned di = c->regs[VCPU_REGS_RDI];
814
815 /* 16-bit ModR/M decode. */
816 switch (c->modrm_mod) {
817 case 0:
818 if (c->modrm_rm == 6)
819 c->modrm_ea += insn_fetch(u16, 2, c->eip);
820 break;
821 case 1:
822 c->modrm_ea += insn_fetch(s8, 1, c->eip);
823 break;
824 case 2:
825 c->modrm_ea += insn_fetch(u16, 2, c->eip);
826 break;
827 }
828 switch (c->modrm_rm) {
829 case 0:
830 c->modrm_ea += bx + si;
831 break;
832 case 1:
833 c->modrm_ea += bx + di;
834 break;
835 case 2:
836 c->modrm_ea += bp + si;
837 break;
838 case 3:
839 c->modrm_ea += bp + di;
840 break;
841 case 4:
842 c->modrm_ea += si;
843 break;
844 case 5:
845 c->modrm_ea += di;
846 break;
847 case 6:
848 if (c->modrm_mod != 0)
849 c->modrm_ea += bp;
850 break;
851 case 7:
852 c->modrm_ea += bx;
853 break;
854 }
855 if (c->modrm_rm == 2 || c->modrm_rm == 3 ||
856 (c->modrm_rm == 6 && c->modrm_mod != 0))
857 if (!c->has_seg_override)
858 set_seg_override(c, VCPU_SREG_SS);
859 c->modrm_ea = (u16)c->modrm_ea;
860 } else {
861 /* 32/64-bit ModR/M decode. */
862 if ((c->modrm_rm & 7) == 4) {
863 sib = insn_fetch(u8, 1, c->eip);
864 index_reg |= (sib >> 3) & 7;
865 base_reg |= sib & 7;
866 scale = sib >> 6;
867
868 if ((base_reg & 7) == 5 && c->modrm_mod == 0)
869 c->modrm_ea += insn_fetch(s32, 4, c->eip);
870 else
871 c->modrm_ea += c->regs[base_reg];
872 if (index_reg != 4)
873 c->modrm_ea += c->regs[index_reg] << scale;
874 } else if ((c->modrm_rm & 7) == 5 && c->modrm_mod == 0) {
875 if (ctxt->mode == X86EMUL_MODE_PROT64)
876 c->rip_relative = 1;
877 } else
878 c->modrm_ea += c->regs[c->modrm_rm];
879 switch (c->modrm_mod) {
880 case 0:
881 if (c->modrm_rm == 5)
882 c->modrm_ea += insn_fetch(s32, 4, c->eip);
883 break;
884 case 1:
885 c->modrm_ea += insn_fetch(s8, 1, c->eip);
886 break;
887 case 2:
888 c->modrm_ea += insn_fetch(s32, 4, c->eip);
889 break;
890 }
891 }
892done:
893 return rc;
894}
895
896static int decode_abs(struct x86_emulate_ctxt *ctxt,
897 struct x86_emulate_ops *ops)
898{
899 struct decode_cache *c = &ctxt->decode;
900 int rc = X86EMUL_CONTINUE;
901
902 switch (c->ad_bytes) {
903 case 2:
904 c->modrm_ea = insn_fetch(u16, 2, c->eip);
905 break;
906 case 4:
907 c->modrm_ea = insn_fetch(u32, 4, c->eip);
908 break;
909 case 8:
910 c->modrm_ea = insn_fetch(u64, 8, c->eip);
911 break;
912 }
913done:
914 return rc;
915}
916
917int
918x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
919{
920 struct decode_cache *c = &ctxt->decode;
921 int rc = X86EMUL_CONTINUE;
922 int mode = ctxt->mode;
923 int def_op_bytes, def_ad_bytes, group, dual;
924
925
926 /* we cannot decode insn before we complete previous rep insn */
927 WARN_ON(ctxt->restart);
928
929 c->eip = ctxt->eip;
930 c->fetch.start = c->fetch.end = c->eip;
931 ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS);
932
933 switch (mode) {
934 case X86EMUL_MODE_REAL:
935 case X86EMUL_MODE_VM86:
936 case X86EMUL_MODE_PROT16:
937 def_op_bytes = def_ad_bytes = 2;
938 break;
939 case X86EMUL_MODE_PROT32:
940 def_op_bytes = def_ad_bytes = 4;
941 break;
942#ifdef CONFIG_X86_64
943 case X86EMUL_MODE_PROT64:
944 def_op_bytes = 4;
945 def_ad_bytes = 8;
946 break;
947#endif
948 default:
949 return -1;
950 }
951
952 c->op_bytes = def_op_bytes;
953 c->ad_bytes = def_ad_bytes;
954
955 /* Legacy prefixes. */
956 for (;;) {
957 switch (c->b = insn_fetch(u8, 1, c->eip)) {
958 case 0x66: /* operand-size override */
959 /* switch between 2/4 bytes */
960 c->op_bytes = def_op_bytes ^ 6;
961 break;
962 case 0x67: /* address-size override */
963 if (mode == X86EMUL_MODE_PROT64)
964 /* switch between 4/8 bytes */
965 c->ad_bytes = def_ad_bytes ^ 12;
966 else
967 /* switch between 2/4 bytes */
968 c->ad_bytes = def_ad_bytes ^ 6;
969 break;
970 case 0x26: /* ES override */
971 case 0x2e: /* CS override */
972 case 0x36: /* SS override */
973 case 0x3e: /* DS override */
974 set_seg_override(c, (c->b >> 3) & 3);
975 break;
976 case 0x64: /* FS override */
977 case 0x65: /* GS override */
978 set_seg_override(c, c->b & 7);
979 break;
980 case 0x40 ... 0x4f: /* REX */
981 if (mode != X86EMUL_MODE_PROT64)
982 goto done_prefixes;
983 c->rex_prefix = c->b;
984 continue;
985 case 0xf0: /* LOCK */
986 c->lock_prefix = 1;
987 break;
988 case 0xf2: /* REPNE/REPNZ */
989 c->rep_prefix = REPNE_PREFIX;
990 break;
991 case 0xf3: /* REP/REPE/REPZ */
992 c->rep_prefix = REPE_PREFIX;
993 break;
994 default:
995 goto done_prefixes;
996 }
997
998 /* Any legacy prefix after a REX prefix nullifies its effect. */
999
1000 c->rex_prefix = 0;
1001 }
1002
1003done_prefixes:
1004
1005 /* REX prefix. */
1006 if (c->rex_prefix)
1007 if (c->rex_prefix & 8)
1008 c->op_bytes = 8; /* REX.W */
1009
1010 /* Opcode byte(s). */
1011 c->d = opcode_table[c->b];
1012 if (c->d == 0) {
1013 /* Two-byte opcode? */
1014 if (c->b == 0x0f) {
1015 c->twobyte = 1;
1016 c->b = insn_fetch(u8, 1, c->eip);
1017 c->d = twobyte_table[c->b];
1018 }
1019 }
1020
1021 if (c->d & Group) {
1022 group = c->d & GroupMask;
1023 dual = c->d & GroupDual;
1024 c->modrm = insn_fetch(u8, 1, c->eip);
1025 --c->eip;
1026
1027 group = (group << 3) + ((c->modrm >> 3) & 7);
1028 c->d &= ~(Group | GroupDual | GroupMask);
1029 if (dual && (c->modrm >> 6) == 3)
1030 c->d |= group2_table[group];
1031 else
1032 c->d |= group_table[group];
1033 }
1034
1035 /* Unrecognised? */
1036 if (c->d == 0 || (c->d & Undefined)) {
1037 DPRINTF("Cannot emulate %02x\n", c->b);
1038 return -1;
1039 }
1040
1041 if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
1042 c->op_bytes = 8;
1043
1044 /* ModRM and SIB bytes. */
1045 if (c->d & ModRM)
1046 rc = decode_modrm(ctxt, ops);
1047 else if (c->d & MemAbs)
1048 rc = decode_abs(ctxt, ops);
1049 if (rc != X86EMUL_CONTINUE)
1050 goto done;
1051
1052 if (!c->has_seg_override)
1053 set_seg_override(c, VCPU_SREG_DS);
1054
1055 if (!(!c->twobyte && c->b == 0x8d))
1056 c->modrm_ea += seg_override_base(ctxt, ops, c);
1057
1058 if (c->ad_bytes != 8)
1059 c->modrm_ea = (u32)c->modrm_ea;
1060
1061 if (c->rip_relative)
1062 c->modrm_ea += c->eip;
1063
1064 /*
1065 * Decode and fetch the source operand: register, memory
1066 * or immediate.
1067 */
1068 switch (c->d & SrcMask) {
1069 case SrcNone:
1070 break;
1071 case SrcReg:
1072 decode_register_operand(&c->src, c, 0);
1073 break;
1074 case SrcMem16:
1075 c->src.bytes = 2;
1076 goto srcmem_common;
1077 case SrcMem32:
1078 c->src.bytes = 4;
1079 goto srcmem_common;
1080 case SrcMem:
1081 c->src.bytes = (c->d & ByteOp) ? 1 :
1082 c->op_bytes;
1083 /* Don't fetch the address for invlpg: it could be unmapped. */
1084 if (c->twobyte && c->b == 0x01 && c->modrm_reg == 7)
1085 break;
1086 srcmem_common:
1087 /*
1088 * For instructions with a ModR/M byte, switch to register
1089 * access if Mod = 3.
1090 */
1091 if ((c->d & ModRM) && c->modrm_mod == 3) {
1092 c->src.type = OP_REG;
1093 c->src.val = c->modrm_val;
1094 c->src.ptr = c->modrm_ptr;
1095 break;
1096 }
1097 c->src.type = OP_MEM;
1098 c->src.ptr = (unsigned long *)c->modrm_ea;
1099 c->src.val = 0;
1100 break;
1101 case SrcImm:
1102 case SrcImmU:
1103 c->src.type = OP_IMM;
1104 c->src.ptr = (unsigned long *)c->eip;
1105 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1106 if (c->src.bytes == 8)
1107 c->src.bytes = 4;
1108 /* NB. Immediates are sign-extended as necessary. */
1109 switch (c->src.bytes) {
1110 case 1:
1111 c->src.val = insn_fetch(s8, 1, c->eip);
1112 break;
1113 case 2:
1114 c->src.val = insn_fetch(s16, 2, c->eip);
1115 break;
1116 case 4:
1117 c->src.val = insn_fetch(s32, 4, c->eip);
1118 break;
1119 }
1120 if ((c->d & SrcMask) == SrcImmU) {
1121 switch (c->src.bytes) {
1122 case 1:
1123 c->src.val &= 0xff;
1124 break;
1125 case 2:
1126 c->src.val &= 0xffff;
1127 break;
1128 case 4:
1129 c->src.val &= 0xffffffff;
1130 break;
1131 }
1132 }
1133 break;
1134 case SrcImmByte:
1135 case SrcImmUByte:
1136 c->src.type = OP_IMM;
1137 c->src.ptr = (unsigned long *)c->eip;
1138 c->src.bytes = 1;
1139 if ((c->d & SrcMask) == SrcImmByte)
1140 c->src.val = insn_fetch(s8, 1, c->eip);
1141 else
1142 c->src.val = insn_fetch(u8, 1, c->eip);
1143 break;
1144 case SrcAcc:
1145 c->src.type = OP_REG;
1146 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1147 c->src.ptr = &c->regs[VCPU_REGS_RAX];
1148 switch (c->src.bytes) {
1149 case 1:
1150 c->src.val = *(u8 *)c->src.ptr;
1151 break;
1152 case 2:
1153 c->src.val = *(u16 *)c->src.ptr;
1154 break;
1155 case 4:
1156 c->src.val = *(u32 *)c->src.ptr;
1157 break;
1158 case 8:
1159 c->src.val = *(u64 *)c->src.ptr;
1160 break;
1161 }
1162 break;
1163 case SrcOne:
1164 c->src.bytes = 1;
1165 c->src.val = 1;
1166 break;
1167 case SrcSI:
1168 c->src.type = OP_MEM;
1169 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1170 c->src.ptr = (unsigned long *)
1171 register_address(c, seg_override_base(ctxt, ops, c),
1172 c->regs[VCPU_REGS_RSI]);
1173 c->src.val = 0;
1174 break;
1175 case SrcImmFAddr:
1176 c->src.type = OP_IMM;
1177 c->src.ptr = (unsigned long *)c->eip;
1178 c->src.bytes = c->op_bytes + 2;
1179 insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip);
1180 break;
1181 case SrcMemFAddr:
1182 c->src.type = OP_MEM;
1183 c->src.ptr = (unsigned long *)c->modrm_ea;
1184 c->src.bytes = c->op_bytes + 2;
1185 break;
1186 }
1187
1188 /*
1189 * Decode and fetch the second source operand: register, memory
1190 * or immediate.
1191 */
1192 switch (c->d & Src2Mask) {
1193 case Src2None:
1194 break;
1195 case Src2CL:
1196 c->src2.bytes = 1;
1197 c->src2.val = c->regs[VCPU_REGS_RCX] & 0x8;
1198 break;
1199 case Src2ImmByte:
1200 c->src2.type = OP_IMM;
1201 c->src2.ptr = (unsigned long *)c->eip;
1202 c->src2.bytes = 1;
1203 c->src2.val = insn_fetch(u8, 1, c->eip);
1204 break;
1205 case Src2One:
1206 c->src2.bytes = 1;
1207 c->src2.val = 1;
1208 break;
1209 }
1210
1211 /* Decode and fetch the destination operand: register or memory. */
1212 switch (c->d & DstMask) {
1213 case ImplicitOps:
1214 /* Special instructions do their own operand decoding. */
1215 return 0;
1216 case DstReg:
1217 decode_register_operand(&c->dst, c,
1218 c->twobyte && (c->b == 0xb6 || c->b == 0xb7));
1219 break;
1220 case DstMem:
1221 case DstMem64:
1222 if ((c->d & ModRM) && c->modrm_mod == 3) {
1223 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1224 c->dst.type = OP_REG;
1225 c->dst.val = c->dst.orig_val = c->modrm_val;
1226 c->dst.ptr = c->modrm_ptr;
1227 break;
1228 }
1229 c->dst.type = OP_MEM;
1230 c->dst.ptr = (unsigned long *)c->modrm_ea;
1231 if ((c->d & DstMask) == DstMem64)
1232 c->dst.bytes = 8;
1233 else
1234 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1235 c->dst.val = 0;
1236 if (c->d & BitOp) {
1237 unsigned long mask = ~(c->dst.bytes * 8 - 1);
1238
1239 c->dst.ptr = (void *)c->dst.ptr +
1240 (c->src.val & mask) / 8;
1241 }
1242 break;
1243 case DstAcc:
1244 c->dst.type = OP_REG;
1245 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1246 c->dst.ptr = &c->regs[VCPU_REGS_RAX];
1247 switch (c->dst.bytes) {
1248 case 1:
1249 c->dst.val = *(u8 *)c->dst.ptr;
1250 break;
1251 case 2:
1252 c->dst.val = *(u16 *)c->dst.ptr;
1253 break;
1254 case 4:
1255 c->dst.val = *(u32 *)c->dst.ptr;
1256 break;
1257 case 8:
1258 c->dst.val = *(u64 *)c->dst.ptr;
1259 break;
1260 }
1261 c->dst.orig_val = c->dst.val;
1262 break;
1263 case DstDI:
1264 c->dst.type = OP_MEM;
1265 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1266 c->dst.ptr = (unsigned long *)
1267 register_address(c, es_base(ctxt, ops),
1268 c->regs[VCPU_REGS_RDI]);
1269 c->dst.val = 0;
1270 break;
1271 }
1272
1273done:
1274 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
1275}
1276
1277static int read_emulated(struct x86_emulate_ctxt *ctxt,
1278 struct x86_emulate_ops *ops,
1279 unsigned long addr, void *dest, unsigned size)
1280{
1281 int rc;
1282 struct read_cache *mc = &ctxt->decode.mem_read;
1283 u32 err;
1284
1285 while (size) {
1286 int n = min(size, 8u);
1287 size -= n;
1288 if (mc->pos < mc->end)
1289 goto read_cached;
1290
1291 rc = ops->read_emulated(addr, mc->data + mc->end, n, &err,
1292 ctxt->vcpu);
1293 if (rc == X86EMUL_PROPAGATE_FAULT)
1294 emulate_pf(ctxt, addr, err);
1295 if (rc != X86EMUL_CONTINUE)
1296 return rc;
1297 mc->end += n;
1298
1299 read_cached:
1300 memcpy(dest, mc->data + mc->pos, n);
1301 mc->pos += n;
1302 dest += n;
1303 addr += n;
1304 }
1305 return X86EMUL_CONTINUE;
1306}
1307
1308static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1309 struct x86_emulate_ops *ops,
1310 unsigned int size, unsigned short port,
1311 void *dest)
1312{
1313 struct read_cache *rc = &ctxt->decode.io_read;
1314
1315 if (rc->pos == rc->end) { /* refill pio read ahead */
1316 struct decode_cache *c = &ctxt->decode;
1317 unsigned int in_page, n;
1318 unsigned int count = c->rep_prefix ?
1319 address_mask(c, c->regs[VCPU_REGS_RCX]) : 1;
1320 in_page = (ctxt->eflags & EFLG_DF) ?
1321 offset_in_page(c->regs[VCPU_REGS_RDI]) :
1322 PAGE_SIZE - offset_in_page(c->regs[VCPU_REGS_RDI]);
1323 n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
1324 count);
1325 if (n == 0)
1326 n = 1;
1327 rc->pos = rc->end = 0;
1328 if (!ops->pio_in_emulated(size, port, rc->data, n, ctxt->vcpu))
1329 return 0;
1330 rc->end = n * size;
1331 }
1332
1333 memcpy(dest, rc->data + rc->pos, size);
1334 rc->pos += size;
1335 return 1;
1336}
1337
1338static u32 desc_limit_scaled(struct desc_struct *desc)
1339{
1340 u32 limit = get_desc_limit(desc);
1341
1342 return desc->g ? (limit << 12) | 0xfff : limit;
1343}
1344
1345static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1346 struct x86_emulate_ops *ops,
1347 u16 selector, struct desc_ptr *dt)
1348{
1349 if (selector & 1 << 2) {
1350 struct desc_struct desc;
1351 memset (dt, 0, sizeof *dt);
1352 if (!ops->get_cached_descriptor(&desc, VCPU_SREG_LDTR, ctxt->vcpu))
1353 return;
1354
1355 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1356 dt->address = get_desc_base(&desc);
1357 } else
1358 ops->get_gdt(dt, ctxt->vcpu);
1359}
1360
1361/* allowed just for 8 bytes segments */
1362static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1363 struct x86_emulate_ops *ops,
1364 u16 selector, struct desc_struct *desc)
1365{
1366 struct desc_ptr dt;
1367 u16 index = selector >> 3;
1368 int ret;
1369 u32 err;
1370 ulong addr;
1371
1372 get_descriptor_table_ptr(ctxt, ops, selector, &dt);
1373
1374 if (dt.size < index * 8 + 7) {
1375 emulate_gp(ctxt, selector & 0xfffc);
1376 return X86EMUL_PROPAGATE_FAULT;
1377 }
1378 addr = dt.address + index * 8;
1379 ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
1380 if (ret == X86EMUL_PROPAGATE_FAULT)
1381 emulate_pf(ctxt, addr, err);
1382
1383 return ret;
1384}
1385
1386/* allowed just for 8 bytes segments */
1387static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1388 struct x86_emulate_ops *ops,
1389 u16 selector, struct desc_struct *desc)
1390{
1391 struct desc_ptr dt;
1392 u16 index = selector >> 3;
1393 u32 err;
1394 ulong addr;
1395 int ret;
1396
1397 get_descriptor_table_ptr(ctxt, ops, selector, &dt);
1398
1399 if (dt.size < index * 8 + 7) {
1400 emulate_gp(ctxt, selector & 0xfffc);
1401 return X86EMUL_PROPAGATE_FAULT;
1402 }
1403
1404 addr = dt.address + index * 8;
1405 ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
1406 if (ret == X86EMUL_PROPAGATE_FAULT)
1407 emulate_pf(ctxt, addr, err);
1408
1409 return ret;
1410}
1411
1412static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1413 struct x86_emulate_ops *ops,
1414 u16 selector, int seg)
1415{
1416 struct desc_struct seg_desc;
1417 u8 dpl, rpl, cpl;
1418 unsigned err_vec = GP_VECTOR;
1419 u32 err_code = 0;
1420 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1421 int ret;
1422
1423 memset(&seg_desc, 0, sizeof seg_desc);
1424
1425 if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
1426 || ctxt->mode == X86EMUL_MODE_REAL) {
1427 /* set real mode segment descriptor */
1428 set_desc_base(&seg_desc, selector << 4);
1429 set_desc_limit(&seg_desc, 0xffff);
1430 seg_desc.type = 3;
1431 seg_desc.p = 1;
1432 seg_desc.s = 1;
1433 goto load;
1434 }
1435
1436 /* NULL selector is not valid for TR, CS and SS */
1437 if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
1438 && null_selector)
1439 goto exception;
1440
1441 /* TR should be in GDT only */
1442 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1443 goto exception;
1444
1445 if (null_selector) /* for NULL selector skip all following checks */
1446 goto load;
1447
1448 ret = read_segment_descriptor(ctxt, ops, selector, &seg_desc);
1449 if (ret != X86EMUL_CONTINUE)
1450 return ret;
1451
1452 err_code = selector & 0xfffc;
1453 err_vec = GP_VECTOR;
1454
1455 /* can't load system descriptor into segment selecor */
1456 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1457 goto exception;
1458
1459 if (!seg_desc.p) {
1460 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1461 goto exception;
1462 }
1463
1464 rpl = selector & 3;
1465 dpl = seg_desc.dpl;
1466 cpl = ops->cpl(ctxt->vcpu);
1467
1468 switch (seg) {
1469 case VCPU_SREG_SS:
1470 /*
1471 * segment is not a writable data segment or segment
1472 * selector's RPL != CPL or segment selector's RPL != CPL
1473 */
1474 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1475 goto exception;
1476 break;
1477 case VCPU_SREG_CS:
1478 if (!(seg_desc.type & 8))
1479 goto exception;
1480
1481 if (seg_desc.type & 4) {
1482 /* conforming */
1483 if (dpl > cpl)
1484 goto exception;
1485 } else {
1486 /* nonconforming */
1487 if (rpl > cpl || dpl != cpl)
1488 goto exception;
1489 }
1490 /* CS(RPL) <- CPL */
1491 selector = (selector & 0xfffc) | cpl;
1492 break;
1493 case VCPU_SREG_TR:
1494 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1495 goto exception;
1496 break;
1497 case VCPU_SREG_LDTR:
1498 if (seg_desc.s || seg_desc.type != 2)
1499 goto exception;
1500 break;
1501 default: /* DS, ES, FS, or GS */
1502 /*
1503 * segment is not a data or readable code segment or
1504 * ((segment is a data or nonconforming code segment)
1505 * and (both RPL and CPL > DPL))
1506 */
1507 if ((seg_desc.type & 0xa) == 0x8 ||
1508 (((seg_desc.type & 0xc) != 0xc) &&
1509 (rpl > dpl && cpl > dpl)))
1510 goto exception;
1511 break;
1512 }
1513
1514 if (seg_desc.s) {
1515 /* mark segment as accessed */
1516 seg_desc.type |= 1;
1517 ret = write_segment_descriptor(ctxt, ops, selector, &seg_desc);
1518 if (ret != X86EMUL_CONTINUE)
1519 return ret;
1520 }
1521load:
1522 ops->set_segment_selector(selector, seg, ctxt->vcpu);
1523 ops->set_cached_descriptor(&seg_desc, seg, ctxt->vcpu);
1524 return X86EMUL_CONTINUE;
1525exception:
1526 emulate_exception(ctxt, err_vec, err_code, true);
1527 return X86EMUL_PROPAGATE_FAULT;
1528}
1529
1530static inline int writeback(struct x86_emulate_ctxt *ctxt,
1531 struct x86_emulate_ops *ops)
1532{
1533 int rc;
1534 struct decode_cache *c = &ctxt->decode;
1535 u32 err;
1536
1537 switch (c->dst.type) {
1538 case OP_REG:
1539 /* The 4-byte case *is* correct:
1540 * in 64-bit mode we zero-extend.
1541 */
1542 switch (c->dst.bytes) {
1543 case 1:
1544 *(u8 *)c->dst.ptr = (u8)c->dst.val;
1545 break;
1546 case 2:
1547 *(u16 *)c->dst.ptr = (u16)c->dst.val;
1548 break;
1549 case 4:
1550 *c->dst.ptr = (u32)c->dst.val;
1551 break; /* 64b: zero-ext */
1552 case 8:
1553 *c->dst.ptr = c->dst.val;
1554 break;
1555 }
1556 break;
1557 case OP_MEM:
1558 if (c->lock_prefix)
1559 rc = ops->cmpxchg_emulated(
1560 (unsigned long)c->dst.ptr,
1561 &c->dst.orig_val,
1562 &c->dst.val,
1563 c->dst.bytes,
1564 &err,
1565 ctxt->vcpu);
1566 else
1567 rc = ops->write_emulated(
1568 (unsigned long)c->dst.ptr,
1569 &c->dst.val,
1570 c->dst.bytes,
1571 &err,
1572 ctxt->vcpu);
1573 if (rc == X86EMUL_PROPAGATE_FAULT)
1574 emulate_pf(ctxt,
1575 (unsigned long)c->dst.ptr, err);
1576 if (rc != X86EMUL_CONTINUE)
1577 return rc;
1578 break;
1579 case OP_NONE:
1580 /* no writeback */
1581 break;
1582 default:
1583 break;
1584 }
1585 return X86EMUL_CONTINUE;
1586}
1587
1588static inline void emulate_push(struct x86_emulate_ctxt *ctxt,
1589 struct x86_emulate_ops *ops)
1590{
1591 struct decode_cache *c = &ctxt->decode;
1592
1593 c->dst.type = OP_MEM;
1594 c->dst.bytes = c->op_bytes;
1595 c->dst.val = c->src.val;
1596 register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
1597 c->dst.ptr = (void *) register_address(c, ss_base(ctxt, ops),
1598 c->regs[VCPU_REGS_RSP]);
1599}
1600
1601static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1602 struct x86_emulate_ops *ops,
1603 void *dest, int len)
1604{
1605 struct decode_cache *c = &ctxt->decode;
1606 int rc;
1607
1608 rc = read_emulated(ctxt, ops, register_address(c, ss_base(ctxt, ops),
1609 c->regs[VCPU_REGS_RSP]),
1610 dest, len);
1611 if (rc != X86EMUL_CONTINUE)
1612 return rc;
1613
1614 register_address_increment(c, &c->regs[VCPU_REGS_RSP], len);
1615 return rc;
1616}
1617
1618static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1619 struct x86_emulate_ops *ops,
1620 void *dest, int len)
1621{
1622 int rc;
1623 unsigned long val, change_mask;
1624 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1625 int cpl = ops->cpl(ctxt->vcpu);
1626
1627 rc = emulate_pop(ctxt, ops, &val, len);
1628 if (rc != X86EMUL_CONTINUE)
1629 return rc;
1630
1631 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1632 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1633
1634 switch(ctxt->mode) {
1635 case X86EMUL_MODE_PROT64:
1636 case X86EMUL_MODE_PROT32:
1637 case X86EMUL_MODE_PROT16:
1638 if (cpl == 0)
1639 change_mask |= EFLG_IOPL;
1640 if (cpl <= iopl)
1641 change_mask |= EFLG_IF;
1642 break;
1643 case X86EMUL_MODE_VM86:
1644 if (iopl < 3) {
1645 emulate_gp(ctxt, 0);
1646 return X86EMUL_PROPAGATE_FAULT;
1647 }
1648 change_mask |= EFLG_IF;
1649 break;
1650 default: /* real mode */
1651 change_mask |= (EFLG_IOPL | EFLG_IF);
1652 break;
1653 }
1654
1655 *(unsigned long *)dest =
1656 (ctxt->eflags & ~change_mask) | (val & change_mask);
1657
1658 return rc;
1659}
1660
1661static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt,
1662 struct x86_emulate_ops *ops, int seg)
1663{
1664 struct decode_cache *c = &ctxt->decode;
1665
1666 c->src.val = ops->get_segment_selector(seg, ctxt->vcpu);
1667
1668 emulate_push(ctxt, ops);
1669}
1670
1671static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
1672 struct x86_emulate_ops *ops, int seg)
1673{
1674 struct decode_cache *c = &ctxt->decode;
1675 unsigned long selector;
1676 int rc;
1677
1678 rc = emulate_pop(ctxt, ops, &selector, c->op_bytes);
1679 if (rc != X86EMUL_CONTINUE)
1680 return rc;
1681
1682 rc = load_segment_descriptor(ctxt, ops, (u16)selector, seg);
1683 return rc;
1684}
1685
1686static int emulate_pusha(struct x86_emulate_ctxt *ctxt,
1687 struct x86_emulate_ops *ops)
1688{
1689 struct decode_cache *c = &ctxt->decode;
1690 unsigned long old_esp = c->regs[VCPU_REGS_RSP];
1691 int rc = X86EMUL_CONTINUE;
1692 int reg = VCPU_REGS_RAX;
1693
1694 while (reg <= VCPU_REGS_RDI) {
1695 (reg == VCPU_REGS_RSP) ?
1696 (c->src.val = old_esp) : (c->src.val = c->regs[reg]);
1697
1698 emulate_push(ctxt, ops);
1699
1700 rc = writeback(ctxt, ops);
1701 if (rc != X86EMUL_CONTINUE)
1702 return rc;
1703
1704 ++reg;
1705 }
1706
1707 /* Disable writeback. */
1708 c->dst.type = OP_NONE;
1709
1710 return rc;
1711}
1712
1713static int emulate_popa(struct x86_emulate_ctxt *ctxt,
1714 struct x86_emulate_ops *ops)
1715{
1716 struct decode_cache *c = &ctxt->decode;
1717 int rc = X86EMUL_CONTINUE;
1718 int reg = VCPU_REGS_RDI;
1719
1720 while (reg >= VCPU_REGS_RAX) {
1721 if (reg == VCPU_REGS_RSP) {
1722 register_address_increment(c, &c->regs[VCPU_REGS_RSP],
1723 c->op_bytes);
1724 --reg;
1725 }
1726
1727 rc = emulate_pop(ctxt, ops, &c->regs[reg], c->op_bytes);
1728 if (rc != X86EMUL_CONTINUE)
1729 break;
1730 --reg;
1731 }
1732 return rc;
1733}
1734
1735static int emulate_iret_real(struct x86_emulate_ctxt *ctxt,
1736 struct x86_emulate_ops *ops)
1737{
1738 struct decode_cache *c = &ctxt->decode;
1739 int rc = X86EMUL_CONTINUE;
1740 unsigned long temp_eip = 0;
1741 unsigned long temp_eflags = 0;
1742 unsigned long cs = 0;
1743 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1744 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1745 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1746 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1747
1748 /* TODO: Add stack limit check */
1749
1750 rc = emulate_pop(ctxt, ops, &temp_eip, c->op_bytes);
1751
1752 if (rc != X86EMUL_CONTINUE)
1753 return rc;
1754
1755 if (temp_eip & ~0xffff) {
1756 emulate_gp(ctxt, 0);
1757 return X86EMUL_PROPAGATE_FAULT;
1758 }
1759
1760 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1761
1762 if (rc != X86EMUL_CONTINUE)
1763 return rc;
1764
1765 rc = emulate_pop(ctxt, ops, &temp_eflags, c->op_bytes);
1766
1767 if (rc != X86EMUL_CONTINUE)
1768 return rc;
1769
1770 rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1771
1772 if (rc != X86EMUL_CONTINUE)
1773 return rc;
1774
1775 c->eip = temp_eip;
1776
1777
1778 if (c->op_bytes == 4)
1779 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1780 else if (c->op_bytes == 2) {
1781 ctxt->eflags &= ~0xffff;
1782 ctxt->eflags |= temp_eflags;
1783 }
1784
1785 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1786 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1787
1788 return rc;
1789}
1790
1791static inline int emulate_iret(struct x86_emulate_ctxt *ctxt,
1792 struct x86_emulate_ops* ops)
1793{
1794 switch(ctxt->mode) {
1795 case X86EMUL_MODE_REAL:
1796 return emulate_iret_real(ctxt, ops);
1797 case X86EMUL_MODE_VM86:
1798 case X86EMUL_MODE_PROT16:
1799 case X86EMUL_MODE_PROT32:
1800 case X86EMUL_MODE_PROT64:
1801 default:
1802 /* iret from protected mode unimplemented yet */
1803 return X86EMUL_UNHANDLEABLE;
1804 }
1805}
1806
1807static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
1808 struct x86_emulate_ops *ops)
1809{
1810 struct decode_cache *c = &ctxt->decode;
1811
1812 return emulate_pop(ctxt, ops, &c->dst.val, c->dst.bytes);
1813}
1814
1815static inline void emulate_grp2(struct x86_emulate_ctxt *ctxt)
1816{
1817 struct decode_cache *c = &ctxt->decode;
1818 switch (c->modrm_reg) {
1819 case 0: /* rol */
1820 emulate_2op_SrcB("rol", c->src, c->dst, ctxt->eflags);
1821 break;
1822 case 1: /* ror */
1823 emulate_2op_SrcB("ror", c->src, c->dst, ctxt->eflags);
1824 break;
1825 case 2: /* rcl */
1826 emulate_2op_SrcB("rcl", c->src, c->dst, ctxt->eflags);
1827 break;
1828 case 3: /* rcr */
1829 emulate_2op_SrcB("rcr", c->src, c->dst, ctxt->eflags);
1830 break;
1831 case 4: /* sal/shl */
1832 case 6: /* sal/shl */
1833 emulate_2op_SrcB("sal", c->src, c->dst, ctxt->eflags);
1834 break;
1835 case 5: /* shr */
1836 emulate_2op_SrcB("shr", c->src, c->dst, ctxt->eflags);
1837 break;
1838 case 7: /* sar */
1839 emulate_2op_SrcB("sar", c->src, c->dst, ctxt->eflags);
1840 break;
1841 }
1842}
1843
1844static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
1845 struct x86_emulate_ops *ops)
1846{
1847 struct decode_cache *c = &ctxt->decode;
1848
1849 switch (c->modrm_reg) {
1850 case 0 ... 1: /* test */
1851 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
1852 break;
1853 case 2: /* not */
1854 c->dst.val = ~c->dst.val;
1855 break;
1856 case 3: /* neg */
1857 emulate_1op("neg", c->dst, ctxt->eflags);
1858 break;
1859 default:
1860 return 0;
1861 }
1862 return 1;
1863}
1864
1865static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt,
1866 struct x86_emulate_ops *ops)
1867{
1868 struct decode_cache *c = &ctxt->decode;
1869
1870 switch (c->modrm_reg) {
1871 case 0: /* inc */
1872 emulate_1op("inc", c->dst, ctxt->eflags);
1873 break;
1874 case 1: /* dec */
1875 emulate_1op("dec", c->dst, ctxt->eflags);
1876 break;
1877 case 2: /* call near abs */ {
1878 long int old_eip;
1879 old_eip = c->eip;
1880 c->eip = c->src.val;
1881 c->src.val = old_eip;
1882 emulate_push(ctxt, ops);
1883 break;
1884 }
1885 case 4: /* jmp abs */
1886 c->eip = c->src.val;
1887 break;
1888 case 6: /* push */
1889 emulate_push(ctxt, ops);
1890 break;
1891 }
1892 return X86EMUL_CONTINUE;
1893}
1894
1895static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
1896 struct x86_emulate_ops *ops)
1897{
1898 struct decode_cache *c = &ctxt->decode;
1899 u64 old = c->dst.orig_val64;
1900
1901 if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
1902 ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
1903 c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
1904 c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1905 ctxt->eflags &= ~EFLG_ZF;
1906 } else {
1907 c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
1908 (u32) c->regs[VCPU_REGS_RBX];
1909
1910 ctxt->eflags |= EFLG_ZF;
1911 }
1912 return X86EMUL_CONTINUE;
1913}
1914
1915static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
1916 struct x86_emulate_ops *ops)
1917{
1918 struct decode_cache *c = &ctxt->decode;
1919 int rc;
1920 unsigned long cs;
1921
1922 rc = emulate_pop(ctxt, ops, &c->eip, c->op_bytes);
1923 if (rc != X86EMUL_CONTINUE)
1924 return rc;
1925 if (c->op_bytes == 4)
1926 c->eip = (u32)c->eip;
1927 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1928 if (rc != X86EMUL_CONTINUE)
1929 return rc;
1930 rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1931 return rc;
1932}
1933
1934static inline void
1935setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1936 struct x86_emulate_ops *ops, struct desc_struct *cs,
1937 struct desc_struct *ss)
1938{
1939 memset(cs, 0, sizeof(struct desc_struct));
1940 ops->get_cached_descriptor(cs, VCPU_SREG_CS, ctxt->vcpu);
1941 memset(ss, 0, sizeof(struct desc_struct));
1942
1943 cs->l = 0; /* will be adjusted later */
1944 set_desc_base(cs, 0); /* flat segment */
1945 cs->g = 1; /* 4kb granularity */
1946 set_desc_limit(cs, 0xfffff); /* 4GB limit */
1947 cs->type = 0x0b; /* Read, Execute, Accessed */
1948 cs->s = 1;
1949 cs->dpl = 0; /* will be adjusted later */
1950 cs->p = 1;
1951 cs->d = 1;
1952
1953 set_desc_base(ss, 0); /* flat segment */
1954 set_desc_limit(ss, 0xfffff); /* 4GB limit */
1955 ss->g = 1; /* 4kb granularity */
1956 ss->s = 1;
1957 ss->type = 0x03; /* Read/Write, Accessed */
1958 ss->d = 1; /* 32bit stack segment */
1959 ss->dpl = 0;
1960 ss->p = 1;
1961}
1962
1963static int
1964emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1965{
1966 struct decode_cache *c = &ctxt->decode;
1967 struct desc_struct cs, ss;
1968 u64 msr_data;
1969 u16 cs_sel, ss_sel;
1970
1971 /* syscall is not available in real mode */
1972 if (ctxt->mode == X86EMUL_MODE_REAL ||
1973 ctxt->mode == X86EMUL_MODE_VM86) {
1974 emulate_ud(ctxt);
1975 return X86EMUL_PROPAGATE_FAULT;
1976 }
1977
1978 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1979
1980 ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
1981 msr_data >>= 32;
1982 cs_sel = (u16)(msr_data & 0xfffc);
1983 ss_sel = (u16)(msr_data + 8);
1984
1985 if (is_long_mode(ctxt->vcpu)) {
1986 cs.d = 0;
1987 cs.l = 1;
1988 }
1989 ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu);
1990 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1991 ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu);
1992 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1993
1994 c->regs[VCPU_REGS_RCX] = c->eip;
1995 if (is_long_mode(ctxt->vcpu)) {
1996#ifdef CONFIG_X86_64
1997 c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
1998
1999 ops->get_msr(ctxt->vcpu,
2000 ctxt->mode == X86EMUL_MODE_PROT64 ?
2001 MSR_LSTAR : MSR_CSTAR, &msr_data);
2002 c->eip = msr_data;
2003
2004 ops->get_msr(ctxt->vcpu, MSR_SYSCALL_MASK, &msr_data);
2005 ctxt->eflags &= ~(msr_data | EFLG_RF);
2006#endif
2007 } else {
2008 /* legacy mode */
2009 ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
2010 c->eip = (u32)msr_data;
2011
2012 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
2013 }
2014
2015 return X86EMUL_CONTINUE;
2016}
2017
2018static int
2019emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
2020{
2021 struct decode_cache *c = &ctxt->decode;
2022 struct desc_struct cs, ss;
2023 u64 msr_data;
2024 u16 cs_sel, ss_sel;
2025
2026 /* inject #GP if in real mode */
2027 if (ctxt->mode == X86EMUL_MODE_REAL) {
2028 emulate_gp(ctxt, 0);
2029 return X86EMUL_PROPAGATE_FAULT;
2030 }
2031
2032 /* XXX sysenter/sysexit have not been tested in 64bit mode.
2033 * Therefore, we inject an #UD.
2034 */
2035 if (ctxt->mode == X86EMUL_MODE_PROT64) {
2036 emulate_ud(ctxt);
2037 return X86EMUL_PROPAGATE_FAULT;
2038 }
2039
2040 setup_syscalls_segments(ctxt, ops, &cs, &ss);
2041
2042 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
2043 switch (ctxt->mode) {
2044 case X86EMUL_MODE_PROT32:
2045 if ((msr_data & 0xfffc) == 0x0) {
2046 emulate_gp(ctxt, 0);
2047 return X86EMUL_PROPAGATE_FAULT;
2048 }
2049 break;
2050 case X86EMUL_MODE_PROT64:
2051 if (msr_data == 0x0) {
2052 emulate_gp(ctxt, 0);
2053 return X86EMUL_PROPAGATE_FAULT;
2054 }
2055 break;
2056 }
2057
2058 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
2059 cs_sel = (u16)msr_data;
2060 cs_sel &= ~SELECTOR_RPL_MASK;
2061 ss_sel = cs_sel + 8;
2062 ss_sel &= ~SELECTOR_RPL_MASK;
2063 if (ctxt->mode == X86EMUL_MODE_PROT64
2064 || is_long_mode(ctxt->vcpu)) {
2065 cs.d = 0;
2066 cs.l = 1;
2067 }
2068
2069 ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu);
2070 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
2071 ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu);
2072 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
2073
2074 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data);
2075 c->eip = msr_data;
2076
2077 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_ESP, &msr_data);
2078 c->regs[VCPU_REGS_RSP] = msr_data;
2079
2080 return X86EMUL_CONTINUE;
2081}
2082
2083static int
2084emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
2085{
2086 struct decode_cache *c = &ctxt->decode;
2087 struct desc_struct cs, ss;
2088 u64 msr_data;
2089 int usermode;
2090 u16 cs_sel, ss_sel;
2091
2092 /* inject #GP if in real mode or Virtual 8086 mode */
2093 if (ctxt->mode == X86EMUL_MODE_REAL ||
2094 ctxt->mode == X86EMUL_MODE_VM86) {
2095 emulate_gp(ctxt, 0);
2096 return X86EMUL_PROPAGATE_FAULT;
2097 }
2098
2099 setup_syscalls_segments(ctxt, ops, &cs, &ss);
2100
2101 if ((c->rex_prefix & 0x8) != 0x0)
2102 usermode = X86EMUL_MODE_PROT64;
2103 else
2104 usermode = X86EMUL_MODE_PROT32;
2105
2106 cs.dpl = 3;
2107 ss.dpl = 3;
2108 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
2109 switch (usermode) {
2110 case X86EMUL_MODE_PROT32:
2111 cs_sel = (u16)(msr_data + 16);
2112 if ((msr_data & 0xfffc) == 0x0) {
2113 emulate_gp(ctxt, 0);
2114 return X86EMUL_PROPAGATE_FAULT;
2115 }
2116 ss_sel = (u16)(msr_data + 24);
2117 break;
2118 case X86EMUL_MODE_PROT64:
2119 cs_sel = (u16)(msr_data + 32);
2120 if (msr_data == 0x0) {
2121 emulate_gp(ctxt, 0);
2122 return X86EMUL_PROPAGATE_FAULT;
2123 }
2124 ss_sel = cs_sel + 8;
2125 cs.d = 0;
2126 cs.l = 1;
2127 break;
2128 }
2129 cs_sel |= SELECTOR_RPL_MASK;
2130 ss_sel |= SELECTOR_RPL_MASK;
2131
2132 ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu);
2133 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
2134 ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu);
2135 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
2136
2137 c->eip = c->regs[VCPU_REGS_RDX];
2138 c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX];
2139
2140 return X86EMUL_CONTINUE;
2141}
2142
2143static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt,
2144 struct x86_emulate_ops *ops)
2145{
2146 int iopl;
2147 if (ctxt->mode == X86EMUL_MODE_REAL)
2148 return false;
2149 if (ctxt->mode == X86EMUL_MODE_VM86)
2150 return true;
2151 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2152 return ops->cpl(ctxt->vcpu) > iopl;
2153}
2154
2155static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2156 struct x86_emulate_ops *ops,
2157 u16 port, u16 len)
2158{
2159 struct desc_struct tr_seg;
2160 int r;
2161 u16 io_bitmap_ptr;
2162 u8 perm, bit_idx = port & 0x7;
2163 unsigned mask = (1 << len) - 1;
2164
2165 ops->get_cached_descriptor(&tr_seg, VCPU_SREG_TR, ctxt->vcpu);
2166 if (!tr_seg.p)
2167 return false;
2168 if (desc_limit_scaled(&tr_seg) < 103)
2169 return false;
2170 r = ops->read_std(get_desc_base(&tr_seg) + 102, &io_bitmap_ptr, 2,
2171 ctxt->vcpu, NULL);
2172 if (r != X86EMUL_CONTINUE)
2173 return false;
2174 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2175 return false;
2176 r = ops->read_std(get_desc_base(&tr_seg) + io_bitmap_ptr + port/8,
2177 &perm, 1, ctxt->vcpu, NULL);
2178 if (r != X86EMUL_CONTINUE)
2179 return false;
2180 if ((perm >> bit_idx) & mask)
2181 return false;
2182 return true;
2183}
2184
2185static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2186 struct x86_emulate_ops *ops,
2187 u16 port, u16 len)
2188{
2189 if (emulator_bad_iopl(ctxt, ops))
2190 if (!emulator_io_port_access_allowed(ctxt, ops, port, len))
2191 return false;
2192 return true;
2193}
2194
2195static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2196 struct x86_emulate_ops *ops,
2197 struct tss_segment_16 *tss)
2198{
2199 struct decode_cache *c = &ctxt->decode;
2200
2201 tss->ip = c->eip;
2202 tss->flag = ctxt->eflags;
2203 tss->ax = c->regs[VCPU_REGS_RAX];
2204 tss->cx = c->regs[VCPU_REGS_RCX];
2205 tss->dx = c->regs[VCPU_REGS_RDX];
2206 tss->bx = c->regs[VCPU_REGS_RBX];
2207 tss->sp = c->regs[VCPU_REGS_RSP];
2208 tss->bp = c->regs[VCPU_REGS_RBP];
2209 tss->si = c->regs[VCPU_REGS_RSI];
2210 tss->di = c->regs[VCPU_REGS_RDI];
2211
2212 tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
2213 tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
2214 tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
2215 tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
2216 tss->ldt = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
2217}
2218
2219static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2220 struct x86_emulate_ops *ops,
2221 struct tss_segment_16 *tss)
2222{
2223 struct decode_cache *c = &ctxt->decode;
2224 int ret;
2225
2226 c->eip = tss->ip;
2227 ctxt->eflags = tss->flag | 2;
2228 c->regs[VCPU_REGS_RAX] = tss->ax;
2229 c->regs[VCPU_REGS_RCX] = tss->cx;
2230 c->regs[VCPU_REGS_RDX] = tss->dx;
2231 c->regs[VCPU_REGS_RBX] = tss->bx;
2232 c->regs[VCPU_REGS_RSP] = tss->sp;
2233 c->regs[VCPU_REGS_RBP] = tss->bp;
2234 c->regs[VCPU_REGS_RSI] = tss->si;
2235 c->regs[VCPU_REGS_RDI] = tss->di;
2236
2237 /*
2238 * SDM says that segment selectors are loaded before segment
2239 * descriptors
2240 */
2241 ops->set_segment_selector(tss->ldt, VCPU_SREG_LDTR, ctxt->vcpu);
2242 ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
2243 ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
2244 ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
2245 ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);
2246
2247 /*
2248 * Now load segment descriptors. If fault happenes at this stage
2249 * it is handled in a context of new task
2250 */
2251 ret = load_segment_descriptor(ctxt, ops, tss->ldt, VCPU_SREG_LDTR);
2252 if (ret != X86EMUL_CONTINUE)
2253 return ret;
2254 ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
2255 if (ret != X86EMUL_CONTINUE)
2256 return ret;
2257 ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
2258 if (ret != X86EMUL_CONTINUE)
2259 return ret;
2260 ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
2261 if (ret != X86EMUL_CONTINUE)
2262 return ret;
2263 ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
2264 if (ret != X86EMUL_CONTINUE)
2265 return ret;
2266
2267 return X86EMUL_CONTINUE;
2268}
2269
2270static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2271 struct x86_emulate_ops *ops,
2272 u16 tss_selector, u16 old_tss_sel,
2273 ulong old_tss_base, struct desc_struct *new_desc)
2274{
2275 struct tss_segment_16 tss_seg;
2276 int ret;
2277 u32 err, new_tss_base = get_desc_base(new_desc);
2278
2279 ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2280 &err);
2281 if (ret == X86EMUL_PROPAGATE_FAULT) {
2282 /* FIXME: need to provide precise fault address */
2283 emulate_pf(ctxt, old_tss_base, err);
2284 return ret;
2285 }
2286
2287 save_state_to_tss16(ctxt, ops, &tss_seg);
2288
2289 ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2290 &err);
2291 if (ret == X86EMUL_PROPAGATE_FAULT) {
2292 /* FIXME: need to provide precise fault address */
2293 emulate_pf(ctxt, old_tss_base, err);
2294 return ret;
2295 }
2296
2297 ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2298 &err);
2299 if (ret == X86EMUL_PROPAGATE_FAULT) {
2300 /* FIXME: need to provide precise fault address */
2301 emulate_pf(ctxt, new_tss_base, err);
2302 return ret;
2303 }
2304
2305 if (old_tss_sel != 0xffff) {
2306 tss_seg.prev_task_link = old_tss_sel;
2307
2308 ret = ops->write_std(new_tss_base,
2309 &tss_seg.prev_task_link,
2310 sizeof tss_seg.prev_task_link,
2311 ctxt->vcpu, &err);
2312 if (ret == X86EMUL_PROPAGATE_FAULT) {
2313 /* FIXME: need to provide precise fault address */
2314 emulate_pf(ctxt, new_tss_base, err);
2315 return ret;
2316 }
2317 }
2318
2319 return load_state_from_tss16(ctxt, ops, &tss_seg);
2320}
2321
2322static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2323 struct x86_emulate_ops *ops,
2324 struct tss_segment_32 *tss)
2325{
2326 struct decode_cache *c = &ctxt->decode;
2327
2328 tss->cr3 = ops->get_cr(3, ctxt->vcpu);
2329 tss->eip = c->eip;
2330 tss->eflags = ctxt->eflags;
2331 tss->eax = c->regs[VCPU_REGS_RAX];
2332 tss->ecx = c->regs[VCPU_REGS_RCX];
2333 tss->edx = c->regs[VCPU_REGS_RDX];
2334 tss->ebx = c->regs[VCPU_REGS_RBX];
2335 tss->esp = c->regs[VCPU_REGS_RSP];
2336 tss->ebp = c->regs[VCPU_REGS_RBP];
2337 tss->esi = c->regs[VCPU_REGS_RSI];
2338 tss->edi = c->regs[VCPU_REGS_RDI];
2339
2340 tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
2341 tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
2342 tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
2343 tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
2344 tss->fs = ops->get_segment_selector(VCPU_SREG_FS, ctxt->vcpu);
2345 tss->gs = ops->get_segment_selector(VCPU_SREG_GS, ctxt->vcpu);
2346 tss->ldt_selector = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
2347}
2348
2349static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2350 struct x86_emulate_ops *ops,
2351 struct tss_segment_32 *tss)
2352{
2353 struct decode_cache *c = &ctxt->decode;
2354 int ret;
2355
2356 if (ops->set_cr(3, tss->cr3, ctxt->vcpu)) {
2357 emulate_gp(ctxt, 0);
2358 return X86EMUL_PROPAGATE_FAULT;
2359 }
2360 c->eip = tss->eip;
2361 ctxt->eflags = tss->eflags | 2;
2362 c->regs[VCPU_REGS_RAX] = tss->eax;
2363 c->regs[VCPU_REGS_RCX] = tss->ecx;
2364 c->regs[VCPU_REGS_RDX] = tss->edx;
2365 c->regs[VCPU_REGS_RBX] = tss->ebx;
2366 c->regs[VCPU_REGS_RSP] = tss->esp;
2367 c->regs[VCPU_REGS_RBP] = tss->ebp;
2368 c->regs[VCPU_REGS_RSI] = tss->esi;
2369 c->regs[VCPU_REGS_RDI] = tss->edi;
2370
2371 /*
2372 * SDM says that segment selectors are loaded before segment
2373 * descriptors
2374 */
2375 ops->set_segment_selector(tss->ldt_selector, VCPU_SREG_LDTR, ctxt->vcpu);
2376 ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
2377 ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
2378 ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
2379 ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);
2380 ops->set_segment_selector(tss->fs, VCPU_SREG_FS, ctxt->vcpu);
2381 ops->set_segment_selector(tss->gs, VCPU_SREG_GS, ctxt->vcpu);
2382
2383 /*
2384 * Now load segment descriptors. If fault happenes at this stage
2385 * it is handled in a context of new task
2386 */
2387 ret = load_segment_descriptor(ctxt, ops, tss->ldt_selector, VCPU_SREG_LDTR);
2388 if (ret != X86EMUL_CONTINUE)
2389 return ret;
2390 ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
2391 if (ret != X86EMUL_CONTINUE)
2392 return ret;
2393 ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
2394 if (ret != X86EMUL_CONTINUE)
2395 return ret;
2396 ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
2397 if (ret != X86EMUL_CONTINUE)
2398 return ret;
2399 ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
2400 if (ret != X86EMUL_CONTINUE)
2401 return ret;
2402 ret = load_segment_descriptor(ctxt, ops, tss->fs, VCPU_SREG_FS);
2403 if (ret != X86EMUL_CONTINUE)
2404 return ret;
2405 ret = load_segment_descriptor(ctxt, ops, tss->gs, VCPU_SREG_GS);
2406 if (ret != X86EMUL_CONTINUE)
2407 return ret;
2408
2409 return X86EMUL_CONTINUE;
2410}
2411
2412static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2413 struct x86_emulate_ops *ops,
2414 u16 tss_selector, u16 old_tss_sel,
2415 ulong old_tss_base, struct desc_struct *new_desc)
2416{
2417 struct tss_segment_32 tss_seg;
2418 int ret;
2419 u32 err, new_tss_base = get_desc_base(new_desc);
2420
2421 ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2422 &err);
2423 if (ret == X86EMUL_PROPAGATE_FAULT) {
2424 /* FIXME: need to provide precise fault address */
2425 emulate_pf(ctxt, old_tss_base, err);
2426 return ret;
2427 }
2428
2429 save_state_to_tss32(ctxt, ops, &tss_seg);
2430
2431 ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2432 &err);
2433 if (ret == X86EMUL_PROPAGATE_FAULT) {
2434 /* FIXME: need to provide precise fault address */
2435 emulate_pf(ctxt, old_tss_base, err);
2436 return ret;
2437 }
2438
2439 ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2440 &err);
2441 if (ret == X86EMUL_PROPAGATE_FAULT) {
2442 /* FIXME: need to provide precise fault address */
2443 emulate_pf(ctxt, new_tss_base, err);
2444 return ret;
2445 }
2446
2447 if (old_tss_sel != 0xffff) {
2448 tss_seg.prev_task_link = old_tss_sel;
2449
2450 ret = ops->write_std(new_tss_base,
2451 &tss_seg.prev_task_link,
2452 sizeof tss_seg.prev_task_link,
2453 ctxt->vcpu, &err);
2454 if (ret == X86EMUL_PROPAGATE_FAULT) {
2455 /* FIXME: need to provide precise fault address */
2456 emulate_pf(ctxt, new_tss_base, err);
2457 return ret;
2458 }
2459 }
2460
2461 return load_state_from_tss32(ctxt, ops, &tss_seg);
2462}
2463
2464static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2465 struct x86_emulate_ops *ops,
2466 u16 tss_selector, int reason,
2467 bool has_error_code, u32 error_code)
2468{
2469 struct desc_struct curr_tss_desc, next_tss_desc;
2470 int ret;
2471 u16 old_tss_sel = ops->get_segment_selector(VCPU_SREG_TR, ctxt->vcpu);
2472 ulong old_tss_base =
2473 ops->get_cached_segment_base(VCPU_SREG_TR, ctxt->vcpu);
2474 u32 desc_limit;
2475
2476 /* FIXME: old_tss_base == ~0 ? */
2477
2478 ret = read_segment_descriptor(ctxt, ops, tss_selector, &next_tss_desc);
2479 if (ret != X86EMUL_CONTINUE)
2480 return ret;
2481 ret = read_segment_descriptor(ctxt, ops, old_tss_sel, &curr_tss_desc);
2482 if (ret != X86EMUL_CONTINUE)
2483 return ret;
2484
2485 /* FIXME: check that next_tss_desc is tss */
2486
2487 if (reason != TASK_SWITCH_IRET) {
2488 if ((tss_selector & 3) > next_tss_desc.dpl ||
2489 ops->cpl(ctxt->vcpu) > next_tss_desc.dpl) {
2490 emulate_gp(ctxt, 0);
2491 return X86EMUL_PROPAGATE_FAULT;
2492 }
2493 }
2494
2495 desc_limit = desc_limit_scaled(&next_tss_desc);
2496 if (!next_tss_desc.p ||
2497 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2498 desc_limit < 0x2b)) {
2499 emulate_ts(ctxt, tss_selector & 0xfffc);
2500 return X86EMUL_PROPAGATE_FAULT;
2501 }
2502
2503 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2504 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2505 write_segment_descriptor(ctxt, ops, old_tss_sel,
2506 &curr_tss_desc);
2507 }
2508
2509 if (reason == TASK_SWITCH_IRET)
2510 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2511
2512 /* set back link to prev task only if NT bit is set in eflags
2513 note that old_tss_sel is not used afetr this point */
2514 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2515 old_tss_sel = 0xffff;
2516
2517 if (next_tss_desc.type & 8)
2518 ret = task_switch_32(ctxt, ops, tss_selector, old_tss_sel,
2519 old_tss_base, &next_tss_desc);
2520 else
2521 ret = task_switch_16(ctxt, ops, tss_selector, old_tss_sel,
2522 old_tss_base, &next_tss_desc);
2523 if (ret != X86EMUL_CONTINUE)
2524 return ret;
2525
2526 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2527 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2528
2529 if (reason != TASK_SWITCH_IRET) {
2530 next_tss_desc.type |= (1 << 1); /* set busy flag */
2531 write_segment_descriptor(ctxt, ops, tss_selector,
2532 &next_tss_desc);
2533 }
2534
2535 ops->set_cr(0, ops->get_cr(0, ctxt->vcpu) | X86_CR0_TS, ctxt->vcpu);
2536 ops->set_cached_descriptor(&next_tss_desc, VCPU_SREG_TR, ctxt->vcpu);
2537 ops->set_segment_selector(tss_selector, VCPU_SREG_TR, ctxt->vcpu);
2538
2539 if (has_error_code) {
2540 struct decode_cache *c = &ctxt->decode;
2541
2542 c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2543 c->lock_prefix = 0;
2544 c->src.val = (unsigned long) error_code;
2545 emulate_push(ctxt, ops);
2546 }
2547
2548 return ret;
2549}
2550
2551int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2552 struct x86_emulate_ops *ops,
2553 u16 tss_selector, int reason,
2554 bool has_error_code, u32 error_code)
2555{
2556 struct decode_cache *c = &ctxt->decode;
2557 int rc;
2558
2559 c->eip = ctxt->eip;
2560 c->dst.type = OP_NONE;
2561
2562 rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason,
2563 has_error_code, error_code);
2564
2565 if (rc == X86EMUL_CONTINUE) {
2566 rc = writeback(ctxt, ops);
2567 if (rc == X86EMUL_CONTINUE)
2568 ctxt->eip = c->eip;
2569 }
2570
2571 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
2572}
2573
2574static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned long base,
2575 int reg, struct operand *op)
2576{
2577 struct decode_cache *c = &ctxt->decode;
2578 int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
2579
2580 register_address_increment(c, &c->regs[reg], df * op->bytes);
2581 op->ptr = (unsigned long *)register_address(c, base, c->regs[reg]);
2582}
2583
2584int
2585x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
2586{
2587 u64 msr_data;
2588 struct decode_cache *c = &ctxt->decode;
2589 int rc = X86EMUL_CONTINUE;
2590 int saved_dst_type = c->dst.type;
2591
2592 ctxt->decode.mem_read.pos = 0;
2593
2594 if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
2595 emulate_ud(ctxt);
2596 goto done;
2597 }
2598
2599 /* LOCK prefix is allowed only with some instructions */
2600 if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) {
2601 emulate_ud(ctxt);
2602 goto done;
2603 }
2604
2605 /* Privileged instruction can be executed only in CPL=0 */
2606 if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) {
2607 emulate_gp(ctxt, 0);
2608 goto done;
2609 }
2610
2611 if (c->rep_prefix && (c->d & String)) {
2612 ctxt->restart = true;
2613 /* All REP prefixes have the same first termination condition */
2614 if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) {
2615 string_done:
2616 ctxt->restart = false;
2617 ctxt->eip = c->eip;
2618 goto done;
2619 }
2620 /* The second termination condition only applies for REPE
2621 * and REPNE. Test if the repeat string operation prefix is
2622 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
2623 * corresponding termination condition according to:
2624 * - if REPE/REPZ and ZF = 0 then done
2625 * - if REPNE/REPNZ and ZF = 1 then done
2626 */
2627 if ((c->b == 0xa6) || (c->b == 0xa7) ||
2628 (c->b == 0xae) || (c->b == 0xaf)) {
2629 if ((c->rep_prefix == REPE_PREFIX) &&
2630 ((ctxt->eflags & EFLG_ZF) == 0))
2631 goto string_done;
2632 if ((c->rep_prefix == REPNE_PREFIX) &&
2633 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))
2634 goto string_done;
2635 }
2636 c->eip = ctxt->eip;
2637 }
2638
2639 if (c->src.type == OP_MEM) {
2640 rc = read_emulated(ctxt, ops, (unsigned long)c->src.ptr,
2641 c->src.valptr, c->src.bytes);
2642 if (rc != X86EMUL_CONTINUE)
2643 goto done;
2644 c->src.orig_val64 = c->src.val64;
2645 }
2646
2647 if (c->src2.type == OP_MEM) {
2648 rc = read_emulated(ctxt, ops, (unsigned long)c->src2.ptr,
2649 &c->src2.val, c->src2.bytes);
2650 if (rc != X86EMUL_CONTINUE)
2651 goto done;
2652 }
2653
2654 if ((c->d & DstMask) == ImplicitOps)
2655 goto special_insn;
2656
2657
2658 if ((c->dst.type == OP_MEM) && !(c->d & Mov)) {
2659 /* optimisation - avoid slow emulated read if Mov */
2660 rc = read_emulated(ctxt, ops, (unsigned long)c->dst.ptr,
2661 &c->dst.val, c->dst.bytes);
2662 if (rc != X86EMUL_CONTINUE)
2663 goto done;
2664 }
2665 c->dst.orig_val = c->dst.val;
2666
2667special_insn:
2668
2669 if (c->twobyte)
2670 goto twobyte_insn;
2671
2672 switch (c->b) {
2673 case 0x00 ... 0x05:
2674 add: /* add */
2675 emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
2676 break;
2677 case 0x06: /* push es */
2678 emulate_push_sreg(ctxt, ops, VCPU_SREG_ES);
2679 break;
2680 case 0x07: /* pop es */
2681 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES);
2682 if (rc != X86EMUL_CONTINUE)
2683 goto done;
2684 break;
2685 case 0x08 ... 0x0d:
2686 or: /* or */
2687 emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
2688 break;
2689 case 0x0e: /* push cs */
2690 emulate_push_sreg(ctxt, ops, VCPU_SREG_CS);
2691 break;
2692 case 0x10 ... 0x15:
2693 adc: /* adc */
2694 emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags);
2695 break;
2696 case 0x16: /* push ss */
2697 emulate_push_sreg(ctxt, ops, VCPU_SREG_SS);
2698 break;
2699 case 0x17: /* pop ss */
2700 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS);
2701 if (rc != X86EMUL_CONTINUE)
2702 goto done;
2703 break;
2704 case 0x18 ... 0x1d:
2705 sbb: /* sbb */
2706 emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags);
2707 break;
2708 case 0x1e: /* push ds */
2709 emulate_push_sreg(ctxt, ops, VCPU_SREG_DS);
2710 break;
2711 case 0x1f: /* pop ds */
2712 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS);
2713 if (rc != X86EMUL_CONTINUE)
2714 goto done;
2715 break;
2716 case 0x20 ... 0x25:
2717 and: /* and */
2718 emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags);
2719 break;
2720 case 0x28 ... 0x2d:
2721 sub: /* sub */
2722 emulate_2op_SrcV("sub", c->src, c->dst, ctxt->eflags);
2723 break;
2724 case 0x30 ... 0x35:
2725 xor: /* xor */
2726 emulate_2op_SrcV("xor", c->src, c->dst, ctxt->eflags);
2727 break;
2728 case 0x38 ... 0x3d:
2729 cmp: /* cmp */
2730 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
2731 break;
2732 case 0x40 ... 0x47: /* inc r16/r32 */
2733 emulate_1op("inc", c->dst, ctxt->eflags);
2734 break;
2735 case 0x48 ... 0x4f: /* dec r16/r32 */
2736 emulate_1op("dec", c->dst, ctxt->eflags);
2737 break;
2738 case 0x50 ... 0x57: /* push reg */
2739 emulate_push(ctxt, ops);
2740 break;
2741 case 0x58 ... 0x5f: /* pop reg */
2742 pop_instruction:
2743 rc = emulate_pop(ctxt, ops, &c->dst.val, c->op_bytes);
2744 if (rc != X86EMUL_CONTINUE)
2745 goto done;
2746 break;
2747 case 0x60: /* pusha */
2748 rc = emulate_pusha(ctxt, ops);
2749 if (rc != X86EMUL_CONTINUE)
2750 goto done;
2751 break;
2752 case 0x61: /* popa */
2753 rc = emulate_popa(ctxt, ops);
2754 if (rc != X86EMUL_CONTINUE)
2755 goto done;
2756 break;
2757 case 0x63: /* movsxd */
2758 if (ctxt->mode != X86EMUL_MODE_PROT64)
2759 goto cannot_emulate;
2760 c->dst.val = (s32) c->src.val;
2761 break;
2762 case 0x68: /* push imm */
2763 case 0x6a: /* push imm8 */
2764 emulate_push(ctxt, ops);
2765 break;
2766 case 0x6c: /* insb */
2767 case 0x6d: /* insw/insd */
2768 c->dst.bytes = min(c->dst.bytes, 4u);
2769 if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX],
2770 c->dst.bytes)) {
2771 emulate_gp(ctxt, 0);
2772 goto done;
2773 }
2774 if (!pio_in_emulated(ctxt, ops, c->dst.bytes,
2775 c->regs[VCPU_REGS_RDX], &c->dst.val))
2776 goto done; /* IO is needed, skip writeback */
2777 break;
2778 case 0x6e: /* outsb */
2779 case 0x6f: /* outsw/outsd */
2780 c->src.bytes = min(c->src.bytes, 4u);
2781 if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX],
2782 c->src.bytes)) {
2783 emulate_gp(ctxt, 0);
2784 goto done;
2785 }
2786 ops->pio_out_emulated(c->src.bytes, c->regs[VCPU_REGS_RDX],
2787 &c->src.val, 1, ctxt->vcpu);
2788
2789 c->dst.type = OP_NONE; /* nothing to writeback */
2790 break;
2791 case 0x70 ... 0x7f: /* jcc (short) */
2792 if (test_cc(c->b, ctxt->eflags))
2793 jmp_rel(c, c->src.val);
2794 break;
2795 case 0x80 ... 0x83: /* Grp1 */
2796 switch (c->modrm_reg) {
2797 case 0:
2798 goto add;
2799 case 1:
2800 goto or;
2801 case 2:
2802 goto adc;
2803 case 3:
2804 goto sbb;
2805 case 4:
2806 goto and;
2807 case 5:
2808 goto sub;
2809 case 6:
2810 goto xor;
2811 case 7:
2812 goto cmp;
2813 }
2814 break;
2815 case 0x84 ... 0x85:
2816 test:
2817 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
2818 break;
2819 case 0x86 ... 0x87: /* xchg */
2820 xchg:
2821 /* Write back the register source. */
2822 switch (c->dst.bytes) {
2823 case 1:
2824 *(u8 *) c->src.ptr = (u8) c->dst.val;
2825 break;
2826 case 2:
2827 *(u16 *) c->src.ptr = (u16) c->dst.val;
2828 break;
2829 case 4:
2830 *c->src.ptr = (u32) c->dst.val;
2831 break; /* 64b reg: zero-extend */
2832 case 8:
2833 *c->src.ptr = c->dst.val;
2834 break;
2835 }
2836 /*
2837 * Write back the memory destination with implicit LOCK
2838 * prefix.
2839 */
2840 c->dst.val = c->src.val;
2841 c->lock_prefix = 1;
2842 break;
2843 case 0x88 ... 0x8b: /* mov */
2844 goto mov;
2845 case 0x8c: /* mov r/m, sreg */
2846 if (c->modrm_reg > VCPU_SREG_GS) {
2847 emulate_ud(ctxt);
2848 goto done;
2849 }
2850 c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu);
2851 break;
2852 case 0x8d: /* lea r16/r32, m */
2853 c->dst.val = c->modrm_ea;
2854 break;
2855 case 0x8e: { /* mov seg, r/m16 */
2856 uint16_t sel;
2857
2858 sel = c->src.val;
2859
2860 if (c->modrm_reg == VCPU_SREG_CS ||
2861 c->modrm_reg > VCPU_SREG_GS) {
2862 emulate_ud(ctxt);
2863 goto done;
2864 }
2865
2866 if (c->modrm_reg == VCPU_SREG_SS)
2867 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
2868
2869 rc = load_segment_descriptor(ctxt, ops, sel, c->modrm_reg);
2870
2871 c->dst.type = OP_NONE; /* Disable writeback. */
2872 break;
2873 }
2874 case 0x8f: /* pop (sole member of Grp1a) */
2875 rc = emulate_grp1a(ctxt, ops);
2876 if (rc != X86EMUL_CONTINUE)
2877 goto done;
2878 break;
2879 case 0x90: /* nop / xchg r8,rax */
2880 if (c->dst.ptr == (unsigned long *)&c->regs[VCPU_REGS_RAX]) {
2881 c->dst.type = OP_NONE; /* nop */
2882 break;
2883 }
2884 case 0x91 ... 0x97: /* xchg reg,rax */
2885 c->src.type = OP_REG;
2886 c->src.bytes = c->op_bytes;
2887 c->src.ptr = (unsigned long *) &c->regs[VCPU_REGS_RAX];
2888 c->src.val = *(c->src.ptr);
2889 goto xchg;
2890 case 0x9c: /* pushf */
2891 c->src.val = (unsigned long) ctxt->eflags;
2892 emulate_push(ctxt, ops);
2893 break;
2894 case 0x9d: /* popf */
2895 c->dst.type = OP_REG;
2896 c->dst.ptr = (unsigned long *) &ctxt->eflags;
2897 c->dst.bytes = c->op_bytes;
2898 rc = emulate_popf(ctxt, ops, &c->dst.val, c->op_bytes);
2899 if (rc != X86EMUL_CONTINUE)
2900 goto done;
2901 break;
2902 case 0xa0 ... 0xa3: /* mov */
2903 case 0xa4 ... 0xa5: /* movs */
2904 goto mov;
2905 case 0xa6 ... 0xa7: /* cmps */
2906 c->dst.type = OP_NONE; /* Disable writeback. */
2907 DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c->src.ptr, c->dst.ptr);
2908 goto cmp;
2909 case 0xa8 ... 0xa9: /* test ax, imm */
2910 goto test;
2911 case 0xaa ... 0xab: /* stos */
2912 c->dst.val = c->regs[VCPU_REGS_RAX];
2913 break;
2914 case 0xac ... 0xad: /* lods */
2915 goto mov;
2916 case 0xae ... 0xaf: /* scas */
2917 DPRINTF("Urk! I don't handle SCAS.\n");
2918 goto cannot_emulate;
2919 case 0xb0 ... 0xbf: /* mov r, imm */
2920 goto mov;
2921 case 0xc0 ... 0xc1:
2922 emulate_grp2(ctxt);
2923 break;
2924 case 0xc3: /* ret */
2925 c->dst.type = OP_REG;
2926 c->dst.ptr = &c->eip;
2927 c->dst.bytes = c->op_bytes;
2928 goto pop_instruction;
2929 case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */
2930 mov:
2931 c->dst.val = c->src.val;
2932 break;
2933 case 0xcb: /* ret far */
2934 rc = emulate_ret_far(ctxt, ops);
2935 if (rc != X86EMUL_CONTINUE)
2936 goto done;
2937 break;
2938 case 0xcf: /* iret */
2939 rc = emulate_iret(ctxt, ops);
2940
2941 if (rc != X86EMUL_CONTINUE)
2942 goto done;
2943 break;
2944 case 0xd0 ... 0xd1: /* Grp2 */
2945 c->src.val = 1;
2946 emulate_grp2(ctxt);
2947 break;
2948 case 0xd2 ... 0xd3: /* Grp2 */
2949 c->src.val = c->regs[VCPU_REGS_RCX];
2950 emulate_grp2(ctxt);
2951 break;
2952 case 0xe4: /* inb */
2953 case 0xe5: /* in */
2954 goto do_io_in;
2955 case 0xe6: /* outb */
2956 case 0xe7: /* out */
2957 goto do_io_out;
2958 case 0xe8: /* call (near) */ {
2959 long int rel = c->src.val;
2960 c->src.val = (unsigned long) c->eip;
2961 jmp_rel(c, rel);
2962 emulate_push(ctxt, ops);
2963 break;
2964 }
2965 case 0xe9: /* jmp rel */
2966 goto jmp;
2967 case 0xea: { /* jmp far */
2968 unsigned short sel;
2969 jump_far:
2970 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
2971
2972 if (load_segment_descriptor(ctxt, ops, sel, VCPU_SREG_CS))
2973 goto done;
2974
2975 c->eip = 0;
2976 memcpy(&c->eip, c->src.valptr, c->op_bytes);
2977 break;
2978 }
2979 case 0xeb:
2980 jmp: /* jmp rel short */
2981 jmp_rel(c, c->src.val);
2982 c->dst.type = OP_NONE; /* Disable writeback. */
2983 break;
2984 case 0xec: /* in al,dx */
2985 case 0xed: /* in (e/r)ax,dx */
2986 c->src.val = c->regs[VCPU_REGS_RDX];
2987 do_io_in:
2988 c->dst.bytes = min(c->dst.bytes, 4u);
2989 if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) {
2990 emulate_gp(ctxt, 0);
2991 goto done;
2992 }
2993 if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
2994 &c->dst.val))
2995 goto done; /* IO is needed */
2996 break;
2997 case 0xee: /* out dx,al */
2998 case 0xef: /* out dx,(e/r)ax */
2999 c->src.val = c->regs[VCPU_REGS_RDX];
3000 do_io_out:
3001 c->dst.bytes = min(c->dst.bytes, 4u);
3002 if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) {
3003 emulate_gp(ctxt, 0);
3004 goto done;
3005 }
3006 ops->pio_out_emulated(c->dst.bytes, c->src.val, &c->dst.val, 1,
3007 ctxt->vcpu);
3008 c->dst.type = OP_NONE; /* Disable writeback. */
3009 break;
3010 case 0xf4: /* hlt */
3011 ctxt->vcpu->arch.halt_request = 1;
3012 break;
3013 case 0xf5: /* cmc */
3014 /* complement carry flag from eflags reg */
3015 ctxt->eflags ^= EFLG_CF;
3016 c->dst.type = OP_NONE; /* Disable writeback. */
3017 break;
3018 case 0xf6 ... 0xf7: /* Grp3 */
3019 if (!emulate_grp3(ctxt, ops))
3020 goto cannot_emulate;
3021 break;
3022 case 0xf8: /* clc */
3023 ctxt->eflags &= ~EFLG_CF;
3024 c->dst.type = OP_NONE; /* Disable writeback. */
3025 break;
3026 case 0xfa: /* cli */
3027 if (emulator_bad_iopl(ctxt, ops)) {
3028 emulate_gp(ctxt, 0);
3029 goto done;
3030 } else {
3031 ctxt->eflags &= ~X86_EFLAGS_IF;
3032 c->dst.type = OP_NONE; /* Disable writeback. */
3033 }
3034 break;
3035 case 0xfb: /* sti */
3036 if (emulator_bad_iopl(ctxt, ops)) {
3037 emulate_gp(ctxt, 0);
3038 goto done;
3039 } else {
3040 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3041 ctxt->eflags |= X86_EFLAGS_IF;
3042 c->dst.type = OP_NONE; /* Disable writeback. */
3043 }
3044 break;
3045 case 0xfc: /* cld */
3046 ctxt->eflags &= ~EFLG_DF;
3047 c->dst.type = OP_NONE; /* Disable writeback. */
3048 break;
3049 case 0xfd: /* std */
3050 ctxt->eflags |= EFLG_DF;
3051 c->dst.type = OP_NONE; /* Disable writeback. */
3052 break;
3053 case 0xfe: /* Grp4 */
3054 grp45:
3055 rc = emulate_grp45(ctxt, ops);
3056 if (rc != X86EMUL_CONTINUE)
3057 goto done;
3058 break;
3059 case 0xff: /* Grp5 */
3060 if (c->modrm_reg == 5)
3061 goto jump_far;
3062 goto grp45;
3063 default:
3064 goto cannot_emulate;
3065 }
3066
3067writeback:
3068 rc = writeback(ctxt, ops);
3069 if (rc != X86EMUL_CONTINUE)
3070 goto done;
3071
3072 /*
3073 * restore dst type in case the decoding will be reused
3074 * (happens for string instruction )
3075 */
3076 c->dst.type = saved_dst_type;
3077
3078 if ((c->d & SrcMask) == SrcSI)
3079 string_addr_inc(ctxt, seg_override_base(ctxt, ops, c),
3080 VCPU_REGS_RSI, &c->src);
3081
3082 if ((c->d & DstMask) == DstDI)
3083 string_addr_inc(ctxt, es_base(ctxt, ops), VCPU_REGS_RDI,
3084 &c->dst);
3085
3086 if (c->rep_prefix && (c->d & String)) {
3087 struct read_cache *rc = &ctxt->decode.io_read;
3088 register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
3089 /*
3090 * Re-enter guest when pio read ahead buffer is empty or,
3091 * if it is not used, after each 1024 iteration.
3092 */
3093 if ((rc->end == 0 && !(c->regs[VCPU_REGS_RCX] & 0x3ff)) ||
3094 (rc->end != 0 && rc->end == rc->pos))
3095 ctxt->restart = false;
3096 }
3097 /*
3098 * reset read cache here in case string instruction is restared
3099 * without decoding
3100 */
3101 ctxt->decode.mem_read.end = 0;
3102 ctxt->eip = c->eip;
3103
3104done:
3105 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
3106
3107twobyte_insn:
3108 switch (c->b) {
3109 case 0x01: /* lgdt, lidt, lmsw */
3110 switch (c->modrm_reg) {
3111 u16 size;
3112 unsigned long address;
3113
3114 case 0: /* vmcall */
3115 if (c->modrm_mod != 3 || c->modrm_rm != 1)
3116 goto cannot_emulate;
3117
3118 rc = kvm_fix_hypercall(ctxt->vcpu);
3119 if (rc != X86EMUL_CONTINUE)
3120 goto done;
3121
3122 /* Let the processor re-execute the fixed hypercall */
3123 c->eip = ctxt->eip;
3124 /* Disable writeback. */
3125 c->dst.type = OP_NONE;
3126 break;
3127 case 2: /* lgdt */
3128 rc = read_descriptor(ctxt, ops, c->src.ptr,
3129 &size, &address, c->op_bytes);
3130 if (rc != X86EMUL_CONTINUE)
3131 goto done;
3132 realmode_lgdt(ctxt->vcpu, size, address);
3133 /* Disable writeback. */
3134 c->dst.type = OP_NONE;
3135 break;
3136 case 3: /* lidt/vmmcall */
3137 if (c->modrm_mod == 3) {
3138 switch (c->modrm_rm) {
3139 case 1:
3140 rc = kvm_fix_hypercall(ctxt->vcpu);
3141 if (rc != X86EMUL_CONTINUE)
3142 goto done;
3143 break;
3144 default:
3145 goto cannot_emulate;
3146 }
3147 } else {
3148 rc = read_descriptor(ctxt, ops, c->src.ptr,
3149 &size, &address,
3150 c->op_bytes);
3151 if (rc != X86EMUL_CONTINUE)
3152 goto done;
3153 realmode_lidt(ctxt->vcpu, size, address);
3154 }
3155 /* Disable writeback. */
3156 c->dst.type = OP_NONE;
3157 break;
3158 case 4: /* smsw */
3159 c->dst.bytes = 2;
3160 c->dst.val = ops->get_cr(0, ctxt->vcpu);
3161 break;
3162 case 6: /* lmsw */
3163 ops->set_cr(0, (ops->get_cr(0, ctxt->vcpu) & ~0x0ful) |
3164 (c->src.val & 0x0f), ctxt->vcpu);
3165 c->dst.type = OP_NONE;
3166 break;
3167 case 5: /* not defined */
3168 emulate_ud(ctxt);
3169 goto done;
3170 case 7: /* invlpg*/
3171 emulate_invlpg(ctxt->vcpu, c->modrm_ea);
3172 /* Disable writeback. */
3173 c->dst.type = OP_NONE;
3174 break;
3175 default:
3176 goto cannot_emulate;
3177 }
3178 break;
3179 case 0x05: /* syscall */
3180 rc = emulate_syscall(ctxt, ops);
3181 if (rc != X86EMUL_CONTINUE)
3182 goto done;
3183 else
3184 goto writeback;
3185 break;
3186 case 0x06:
3187 emulate_clts(ctxt->vcpu);
3188 c->dst.type = OP_NONE;
3189 break;
3190 case 0x09: /* wbinvd */
3191 kvm_emulate_wbinvd(ctxt->vcpu);
3192 c->dst.type = OP_NONE;
3193 break;
3194 case 0x08: /* invd */
3195 case 0x0d: /* GrpP (prefetch) */
3196 case 0x18: /* Grp16 (prefetch/nop) */
3197 c->dst.type = OP_NONE;
3198 break;
3199 case 0x20: /* mov cr, reg */
3200 switch (c->modrm_reg) {
3201 case 1:
3202 case 5 ... 7:
3203 case 9 ... 15:
3204 emulate_ud(ctxt);
3205 goto done;
3206 }
3207 c->regs[c->modrm_rm] = ops->get_cr(c->modrm_reg, ctxt->vcpu);
3208 c->dst.type = OP_NONE; /* no writeback */
3209 break;
3210 case 0x21: /* mov from dr to reg */
3211 if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
3212 (c->modrm_reg == 4 || c->modrm_reg == 5)) {
3213 emulate_ud(ctxt);
3214 goto done;
3215 }
3216 ops->get_dr(c->modrm_reg, &c->regs[c->modrm_rm], ctxt->vcpu);
3217 c->dst.type = OP_NONE; /* no writeback */
3218 break;
3219 case 0x22: /* mov reg, cr */
3220 if (ops->set_cr(c->modrm_reg, c->modrm_val, ctxt->vcpu)) {
3221 emulate_gp(ctxt, 0);
3222 goto done;
3223 }
3224 c->dst.type = OP_NONE;
3225 break;
3226 case 0x23: /* mov from reg to dr */
3227 if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
3228 (c->modrm_reg == 4 || c->modrm_reg == 5)) {
3229 emulate_ud(ctxt);
3230 goto done;
3231 }
3232
3233 if (ops->set_dr(c->modrm_reg, c->regs[c->modrm_rm] &
3234 ((ctxt->mode == X86EMUL_MODE_PROT64) ?
3235 ~0ULL : ~0U), ctxt->vcpu) < 0) {
3236 /* #UD condition is already handled by the code above */
3237 emulate_gp(ctxt, 0);
3238 goto done;
3239 }
3240
3241 c->dst.type = OP_NONE; /* no writeback */
3242 break;
3243 case 0x30:
3244 /* wrmsr */
3245 msr_data = (u32)c->regs[VCPU_REGS_RAX]
3246 | ((u64)c->regs[VCPU_REGS_RDX] << 32);
3247 if (ops->set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data)) {
3248 emulate_gp(ctxt, 0);
3249 goto done;
3250 }
3251 rc = X86EMUL_CONTINUE;
3252 c->dst.type = OP_NONE;
3253 break;
3254 case 0x32:
3255 /* rdmsr */
3256 if (ops->get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data)) {
3257 emulate_gp(ctxt, 0);
3258 goto done;
3259 } else {
3260 c->regs[VCPU_REGS_RAX] = (u32)msr_data;
3261 c->regs[VCPU_REGS_RDX] = msr_data >> 32;
3262 }
3263 rc = X86EMUL_CONTINUE;
3264 c->dst.type = OP_NONE;
3265 break;
3266 case 0x34: /* sysenter */
3267 rc = emulate_sysenter(ctxt, ops);
3268 if (rc != X86EMUL_CONTINUE)
3269 goto done;
3270 else
3271 goto writeback;
3272 break;
3273 case 0x35: /* sysexit */
3274 rc = emulate_sysexit(ctxt, ops);
3275 if (rc != X86EMUL_CONTINUE)
3276 goto done;
3277 else
3278 goto writeback;
3279 break;
3280 case 0x40 ... 0x4f: /* cmov */
3281 c->dst.val = c->dst.orig_val = c->src.val;
3282 if (!test_cc(c->b, ctxt->eflags))
3283 c->dst.type = OP_NONE; /* no writeback */
3284 break;
3285 case 0x80 ... 0x8f: /* jnz rel, etc*/
3286 if (test_cc(c->b, ctxt->eflags))
3287 jmp_rel(c, c->src.val);
3288 c->dst.type = OP_NONE;
3289 break;
3290 case 0xa0: /* push fs */
3291 emulate_push_sreg(ctxt, ops, VCPU_SREG_FS);
3292 break;
3293 case 0xa1: /* pop fs */
3294 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS);
3295 if (rc != X86EMUL_CONTINUE)
3296 goto done;
3297 break;
3298 case 0xa3:
3299 bt: /* bt */
3300 c->dst.type = OP_NONE;
3301 /* only subword offset */
3302 c->src.val &= (c->dst.bytes << 3) - 1;
3303 emulate_2op_SrcV_nobyte("bt", c->src, c->dst, ctxt->eflags);
3304 break;
3305 case 0xa4: /* shld imm8, r, r/m */
3306 case 0xa5: /* shld cl, r, r/m */
3307 emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags);
3308 break;
3309 case 0xa8: /* push gs */
3310 emulate_push_sreg(ctxt, ops, VCPU_SREG_GS);
3311 break;
3312 case 0xa9: /* pop gs */
3313 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS);
3314 if (rc != X86EMUL_CONTINUE)
3315 goto done;
3316 break;
3317 case 0xab:
3318 bts: /* bts */
3319 /* only subword offset */
3320 c->src.val &= (c->dst.bytes << 3) - 1;
3321 emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags);
3322 break;
3323 case 0xac: /* shrd imm8, r, r/m */
3324 case 0xad: /* shrd cl, r, r/m */
3325 emulate_2op_cl("shrd", c->src2, c->src, c->dst, ctxt->eflags);
3326 break;
3327 case 0xae: /* clflush */
3328 break;
3329 case 0xb0 ... 0xb1: /* cmpxchg */
3330 /*
3331 * Save real source value, then compare EAX against
3332 * destination.
3333 */
3334 c->src.orig_val = c->src.val;
3335 c->src.val = c->regs[VCPU_REGS_RAX];
3336 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
3337 if (ctxt->eflags & EFLG_ZF) {
3338 /* Success: write back to memory. */
3339 c->dst.val = c->src.orig_val;
3340 } else {
3341 /* Failure: write the value we saw to EAX. */
3342 c->dst.type = OP_REG;
3343 c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX];
3344 }
3345 break;
3346 case 0xb3:
3347 btr: /* btr */
3348 /* only subword offset */
3349 c->src.val &= (c->dst.bytes << 3) - 1;
3350 emulate_2op_SrcV_nobyte("btr", c->src, c->dst, ctxt->eflags);
3351 break;
3352 case 0xb6 ... 0xb7: /* movzx */
3353 c->dst.bytes = c->op_bytes;
3354 c->dst.val = (c->d & ByteOp) ? (u8) c->src.val
3355 : (u16) c->src.val;
3356 break;
3357 case 0xba: /* Grp8 */
3358 switch (c->modrm_reg & 3) {
3359 case 0:
3360 goto bt;
3361 case 1:
3362 goto bts;
3363 case 2:
3364 goto btr;
3365 case 3:
3366 goto btc;
3367 }
3368 break;
3369 case 0xbb:
3370 btc: /* btc */
3371 /* only subword offset */
3372 c->src.val &= (c->dst.bytes << 3) - 1;
3373 emulate_2op_SrcV_nobyte("btc", c->src, c->dst, ctxt->eflags);
3374 break;
3375 case 0xbe ... 0xbf: /* movsx */
3376 c->dst.bytes = c->op_bytes;
3377 c->dst.val = (c->d & ByteOp) ? (s8) c->src.val :
3378 (s16) c->src.val;
3379 break;
3380 case 0xc3: /* movnti */
3381 c->dst.bytes = c->op_bytes;
3382 c->dst.val = (c->op_bytes == 4) ? (u32) c->src.val :
3383 (u64) c->src.val;
3384 break;
3385 case 0xc7: /* Grp9 (cmpxchg8b) */
3386 rc = emulate_grp9(ctxt, ops);
3387 if (rc != X86EMUL_CONTINUE)
3388 goto done;
3389 break;
3390 default:
3391 goto cannot_emulate;
3392 }
3393 goto writeback;
3394
3395cannot_emulate:
3396 DPRINTF("Cannot emulate %02x\n", c->b);
3397 return -1;
3398}