2 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
13 * We don't allow single-stepping an mtmsrd that would clear
14 * MSR_RI, since that would make the exception unrecoverable.
15 * Since we need to single-step to proceed from a breakpoint,
16 * we don't allow putting a breakpoint on an mtmsrd instruction.
17 * Similarly we don't allow breakpoints on rfid instructions.
18 * These macros tell us if an instruction is a mtmsrd or rfid.
19 * Note that IS_MTMSRD returns true for both an mtmsr (32-bit)
20 * and an mtmsrd (64-bit).
22 #define IS_MTMSRD(instr) (((instr) & 0xfc0007be) == 0x7c000124)
23 #define IS_RFID(instr) (((instr) & 0xfc0007fe) == 0x4c000024)
24 #define IS_RFI(instr) (((instr) & 0xfc0007fe) == 0x4c000064)
26 enum instruction_type {
27 COMPUTE, /* arith/logical/CR op, etc. */
28 LOAD, /* load and store types need to be contiguous */
53 #define INSTR_TYPE_MASK 0x1f
55 #define OP_IS_LOAD_STORE(type) (LOAD <= (type) && (type) <= STCX)
57 /* Compute flags, ORed in with type */
62 /* Branch flags, ORed in with type */
67 /* Load/store flags, ORed in with type */
69 #define UPDATE 0x40 /* matches bit in opcode 31 instructions */
73 /* Barrier type field, ORed in with type */
74 #define BARRIER_MASK 0xe0
75 #define BARRIER_SYNC 0x00
76 #define BARRIER_ISYNC 0x20
77 #define BARRIER_EIEIO 0x40
78 #define BARRIER_LWSYNC 0x60
79 #define BARRIER_PTESYNC 0x80
81 /* Cacheop values, ORed in with type */
82 #define CACHEOP_MASK 0x700
90 /* VSX flags values */
91 #define VSX_FPCONV 1 /* do floating point SP/DP conversion */
92 #define VSX_SPLAT 2 /* store loaded value into all elements */
93 #define VSX_LDLEFT 4 /* load VSX register from left */
94 #define VSX_CHECK_VEC 8 /* check MSR_VEC not MSR_VSX for reg >= 32 */
96 /* Size field in type word */
97 #define SIZE(n) ((n) << 12)
98 #define GETSIZE(w) ((w) >> 12)
100 #define GETTYPE(t) ((t) & INSTR_TYPE_MASK)
102 #define MKOP(t, f, s) ((t) | (f) | SIZE(s))
104 struct instruction_op {
108 /* For LOAD/STORE/LARX/STCX */
115 u8 element_size; /* for VSX/VMX loads/stores */
130 * Decode an instruction, and return information about it in *op
131 * without changing *regs.
133 * Return value is 1 if the instruction can be emulated just by
134 * updating *regs with the information in *op, -1 if we need the
135 * GPRs but *regs doesn't contain the full register set, or 0
138 extern int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
142 * Emulate an instruction that can be executed just by updating
145 void emulate_update_regs(struct pt_regs *reg, struct instruction_op *op);
148 * Emulate instructions that cause a transfer of control,
149 * arithmetic/logical instructions, loads and stores,
150 * cache operations and barriers.
152 * Returns 1 if the instruction was emulated successfully,
153 * 0 if it could not be emulated, or -1 for an instruction that
154 * should not be emulated (rfid, mtmsrd clearing MSR_RI, etc.).
156 extern int emulate_step(struct pt_regs *regs, unsigned int instr);
159 * Emulate a load or store instruction by reading/writing the
160 * memory of the current process. FP/VMX/VSX registers are assumed
161 * to hold live values if the appropriate enable bit in regs->msr is
162 * set; otherwise this will use the saved values in the thread struct
163 * for user-mode accesses.
165 extern int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op);
167 extern void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
168 const void *mem, bool cross_endian);
169 extern void emulate_vsx_store(struct instruction_op *op,
170 const union vsx_reg *reg, void *mem,
172 extern int emulate_dcbz(unsigned long ea, struct pt_regs *regs);