Merge tag 'smp-urgent-2023-09-02' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / arch / powerpc / include / asm / sstep.h
CommitLineData
2874c5fd 1/* SPDX-License-Identifier: GPL-2.0-or-later */
1da177e4
LT
2/*
3 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
1da177e4 4 */
94afd069 5#include <asm/inst.h>
1da177e4
LT
6
7struct pt_regs;
8
9/*
10 * We don't allow single-stepping an mtmsrd that would clear
11 * MSR_RI, since that would make the exception unrecoverable.
12 * Since we need to single-step to proceed from a breakpoint,
13 * we don't allow putting a breakpoint on an mtmsrd instruction.
14 * Similarly we don't allow breakpoints on rfid instructions.
15 * These macros tell us if an instruction is a mtmsrd or rfid.
0566fa76
NR
16 * Note that these return true for both mtmsr/rfi (32-bit)
17 * and mtmsrd/rfid (64-bit).
1da177e4 18 */
777e26f0 19#define IS_MTMSRD(instr) ((ppc_inst_val(instr) & 0xfc0007be) == 0x7c000124)
0566fa76 20#define IS_RFID(instr) ((ppc_inst_val(instr) & 0xfc0007be) == 0x4c000024)
1da177e4 21
be96f633
PM
22enum instruction_type {
23 COMPUTE, /* arith/logical/CR op, etc. */
d120cdbc 24 LOAD, /* load and store types need to be contiguous */
be96f633
PM
25 LOAD_MULTI,
26 LOAD_FP,
27 LOAD_VMX,
28 LOAD_VSX,
29 STORE,
30 STORE_MULTI,
31 STORE_FP,
32 STORE_VMX,
33 STORE_VSX,
34 LARX,
35 STCX,
36 BRANCH,
37 MFSPR,
38 MTSPR,
39 CACHEOP,
40 BARRIER,
41 SYSCALL,
7fa95f9a 42 SYSCALL_VECTORED_0,
be96f633
PM
43 MFMSR,
44 MTMSR,
45 RFI,
46 INTERRUPT,
47 UNKNOWN
48};
49
50#define INSTR_TYPE_MASK 0x1f
51
74c68810
RB
52#define OP_IS_LOAD(type) ((LOAD <= (type) && (type) <= LOAD_VSX) || (type) == LARX)
53#define OP_IS_STORE(type) ((STORE <= (type) && (type) <= STORE_VSX) || (type) == STCX)
d120cdbc
PM
54#define OP_IS_LOAD_STORE(type) (LOAD <= (type) && (type) <= STCX)
55
3cdfcbfd
PM
56/* Compute flags, ORed in with type */
57#define SETREG 0x20
58#define SETCC 0x40
59#define SETXER 0x80
60
61/* Branch flags, ORed in with type */
62#define SETLK 0x20
63#define BRTAKEN 0x40
64#define DECCTR 0x80
65
be96f633
PM
66/* Load/store flags, ORed in with type */
67#define SIGNEXT 0x20
68#define UPDATE 0x40 /* matches bit in opcode 31 instructions */
69#define BYTEREV 0x80
d2b65ac6 70#define FPCONV 0x100
be96f633 71
3cdfcbfd
PM
72/* Barrier type field, ORed in with type */
73#define BARRIER_MASK 0xe0
74#define BARRIER_SYNC 0x00
75#define BARRIER_ISYNC 0x20
76#define BARRIER_EIEIO 0x40
77#define BARRIER_LWSYNC 0x60
78#define BARRIER_PTESYNC 0x80
79
be96f633
PM
80/* Cacheop values, ORed in with type */
81#define CACHEOP_MASK 0x700
82#define DCBST 0
83#define DCBF 0x100
84#define DCBTST 0x200
85#define DCBT 0x300
cf87c3f6 86#define ICBI 0x400
b2543f7b 87#define DCBZ 0x500
be96f633 88
350779a2
PM
89/* VSX flags values */
90#define VSX_FPCONV 1 /* do floating point SP/DP conversion */
91#define VSX_SPLAT 2 /* store loaded value into all elements */
92#define VSX_LDLEFT 4 /* load VSX register from left */
93#define VSX_CHECK_VEC 8 /* check MSR_VEC not MSR_VSX for reg >= 32 */
94
50b80a12
JN
95/* Prefixed flag, ORed in with type */
96#define PREFIXED 0x800
97
be96f633 98/* Size field in type word */
d2b65ac6
PM
99#define SIZE(n) ((n) << 12)
100#define GETSIZE(w) ((w) >> 12)
be96f633 101
e6684d07 102#define GETTYPE(t) ((t) & INSTR_TYPE_MASK)
50b80a12 103#define GETLENGTH(t) (((t) & PREFIXED) ? 8 : 4)
e6684d07 104
be96f633
PM
105#define MKOP(t, f, s) ((t) | (f) | SIZE(s))
106
68a180a4
B
107/* Prefix instruction operands */
108#define GET_PREFIX_RA(i) (((i) >> 16) & 0x1f)
109#define GET_PREFIX_R(i) ((i) & (1ul << 20))
110
e93ad65e
B
111extern s32 patch__exec_instr;
112
be96f633
PM
113struct instruction_op {
114 int type;
115 int reg;
116 unsigned long val;
117 /* For LOAD/STORE/LARX/STCX */
118 unsigned long ea;
119 int update_reg;
120 /* For MFSPR */
121 int spr;
3cdfcbfd
PM
122 u32 ccval;
123 u32 xerval;
350779a2
PM
124 u8 element_size; /* for VSX/VMX loads/stores */
125 u8 vsx_flags;
126};
127
128union vsx_reg {
129 u8 b[16];
130 u16 h[8];
131 u32 w[4];
132 unsigned long d[2];
133 float fp[4];
134 double dp[2];
c22435a5 135 __vector128 v;
be96f633
PM
136};
137
3cdfcbfd
PM
138/*
139 * Decode an instruction, and return information about it in *op
140 * without changing *regs.
141 *
142 * Return value is 1 if the instruction can be emulated just by
143 * updating *regs with the information in *op, -1 if we need the
144 * GPRs but *regs doesn't contain the full register set, or 0
145 * otherwise.
146 */
147extern int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
c545b9f0 148 ppc_inst_t instr);
3cdfcbfd
PM
149
150/*
151 * Emulate an instruction that can be executed just by updating
152 * fields in *regs.
153 */
154void emulate_update_regs(struct pt_regs *reg, struct instruction_op *op);
155
156/*
157 * Emulate instructions that cause a transfer of control,
158 * arithmetic/logical instructions, loads and stores,
159 * cache operations and barriers.
160 *
161 * Returns 1 if the instruction was emulated successfully,
162 * 0 if it could not be emulated, or -1 for an instruction that
163 * should not be emulated (rfid, mtmsrd clearing MSR_RI, etc.).
164 */
c545b9f0 165int emulate_step(struct pt_regs *regs, ppc_inst_t instr);
3cdfcbfd 166
a53d5182
PM
167/*
168 * Emulate a load or store instruction by reading/writing the
169 * memory of the current process. FP/VMX/VSX registers are assumed
170 * to hold live values if the appropriate enable bit in regs->msr is
171 * set; otherwise this will use the saved values in the thread struct
172 * for user-mode accesses.
173 */
174extern int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op);
175
350779a2 176extern void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
d955189a
PM
177 const void *mem, bool cross_endian);
178extern void emulate_vsx_store(struct instruction_op *op,
179 const union vsx_reg *reg, void *mem,
180 bool cross_endian);
b2543f7b 181extern int emulate_dcbz(unsigned long ea, struct pt_regs *regs);