1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Copyright (C) 2001 PPC64 Team, IBM Corp
5 * This struct defines the way the registers are stored on the
6 * kernel stack during a system call or other kernel entry.
8 * this should only contain volatile regs
9 * since we can keep non-volatile in the thread_struct
10 * should set this up when only volatiles are saved
13 * Since this is going on the stack, *CARE MUST BE TAKEN* to insure
14 * that the overall structure is a multiple of 16 bytes in length.
16 * Note that the offsets of the fields in this struct correspond with
17 * the PT_* values below. This simplifies arch/powerpc/kernel/ptrace.c.
19 #ifndef _ASM_POWERPC_PTRACE_H
20 #define _ASM_POWERPC_PTRACE_H
22 #include <linux/err.h>
23 #include <uapi/asm/ptrace.h>
24 #include <asm/asm-const.h>
31 struct user_pt_regs user_regs;
33 unsigned long gpr[32];
36 unsigned long orig_gpr3;
58 #if defined(CONFIG_PPC64) || defined(CONFIG_PPC_KUAP)
63 unsigned long exit_result;
66 #ifdef CONFIG_PPC_KUAP
69 #ifdef CONFIG_PPC_PKEY
73 #ifdef CONFIG_PPC_PKEY
77 unsigned long __pad[4]; /* Maintain 16 byte interrupt stack alignment */
80 #if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
81 struct { /* Must be a multiple of 16 bytes */
100 // Always displays as "REGS" in memory dumps
101 #ifdef CONFIG_CPU_BIG_ENDIAN
102 #define STACK_FRAME_REGS_MARKER ASM_CONST(0x52454753)
104 #define STACK_FRAME_REGS_MARKER ASM_CONST(0x53474552)
110 * Size of redzone that userspace is allowed to use below the stack
111 * pointer. This is 288 in the 64-bit big-endian ELF ABI, and 512 in
112 * the new ELFv2 little-endian ABI, so we allow the larger amount.
114 * For kernel code we allow a 288-byte redzone, in order to conserve
115 * kernel stack space; gcc currently only uses 288 bytes, and will
116 * hopefully allow explicit control of the redzone size in future.
118 #define USER_REDZONE_SIZE 512
119 #define KERNEL_REDZONE_SIZE 288
121 #define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */
122 #define STACK_FRAME_LR_SAVE 2 /* Location of LR in stack frame */
123 #define STACK_USER_INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD)
124 #define STACK_INT_FRAME_REGS STACK_FRAME_OVERHEAD
125 #define STACK_INT_FRAME_MARKER (STACK_FRAME_OVERHEAD - 16)
126 #define STACK_SWITCH_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD)
127 #define STACK_SWITCH_FRAME_REGS STACK_FRAME_OVERHEAD
129 #ifdef CONFIG_PPC64_ELF_ABI_V2
130 #define STACK_FRAME_MIN_SIZE 32
132 #define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD
135 /* Size of dummy stack frame allocated when calling signal handler. */
136 #define __SIGNAL_FRAMESIZE 128
137 #define __SIGNAL_FRAMESIZE32 64
139 #else /* __powerpc64__ */
141 #define USER_REDZONE_SIZE 0
142 #define KERNEL_REDZONE_SIZE 0
143 #define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */
144 #define STACK_FRAME_LR_SAVE 1 /* Location of LR in stack frame */
145 #define STACK_USER_INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD)
146 #define STACK_INT_FRAME_REGS STACK_FRAME_OVERHEAD
147 #define STACK_INT_FRAME_MARKER (STACK_FRAME_OVERHEAD - 8)
148 #define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD
149 #define STACK_SWITCH_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD)
150 #define STACK_SWITCH_FRAME_REGS STACK_FRAME_OVERHEAD
152 /* Size of stack frame allocated when calling signal handler. */
153 #define __SIGNAL_FRAMESIZE 64
155 #endif /* __powerpc64__ */
157 #define STACK_INT_FRAME_SIZE (KERNEL_REDZONE_SIZE + STACK_USER_INT_FRAME_SIZE)
158 #define STACK_INT_FRAME_MARKER_LONGS (STACK_INT_FRAME_MARKER/sizeof(long))
161 #include <asm/paca.h>
164 extern unsigned long profile_pc(struct pt_regs *regs);
166 #define profile_pc(regs) instruction_pointer(regs)
169 long do_syscall_trace_enter(struct pt_regs *regs);
170 void do_syscall_trace_leave(struct pt_regs *regs);
172 static inline void set_return_regs_changed(void)
174 #ifdef CONFIG_PPC_BOOK3S_64
175 local_paca->hsrr_valid = 0;
176 local_paca->srr_valid = 0;
180 static inline void regs_set_return_ip(struct pt_regs *regs, unsigned long ip)
183 set_return_regs_changed();
186 static inline void regs_set_return_msr(struct pt_regs *regs, unsigned long msr)
189 set_return_regs_changed();
192 static inline void regs_add_return_ip(struct pt_regs *regs, long offset)
194 regs_set_return_ip(regs, regs->nip + offset);
197 static inline unsigned long instruction_pointer(struct pt_regs *regs)
202 static inline void instruction_pointer_set(struct pt_regs *regs,
205 regs_set_return_ip(regs, val);
208 static inline unsigned long user_stack_pointer(struct pt_regs *regs)
213 static inline unsigned long frame_pointer(struct pt_regs *regs)
218 #define user_mode(regs) (((regs)->msr & MSR_PR) != 0)
220 #define force_successful_syscall_return() \
222 set_thread_flag(TIF_NOERROR); \
225 #define current_pt_regs() \
226 ((struct pt_regs *)((unsigned long)task_stack_page(current) + THREAD_SIZE) - 1)
229 * The 4 low bits (0xf) are available as flags to overload the trap word,
230 * because interrupt vectors have minimum alignment of 0x10. TRAP_FLAGS_MASK
231 * must cover the bits used as flags, including bit 0 which is used as the
235 #define TRAP_FLAGS_MASK 0x1
238 * On 4xx we use bit 1 in the trap word to indicate whether the exception
239 * is a critical exception (1 means it is).
241 #define TRAP_FLAGS_MASK 0xf
242 #define IS_CRITICAL_EXC(regs) (((regs)->trap & 2) != 0)
243 #define IS_MCHECK_EXC(regs) (((regs)->trap & 4) != 0)
244 #define IS_DEBUG_EXC(regs) (((regs)->trap & 8) != 0)
245 #endif /* __powerpc64__ */
246 #define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
248 static __always_inline void set_trap(struct pt_regs *regs, unsigned long val)
250 regs->trap = (regs->trap & TRAP_FLAGS_MASK) | (val & ~TRAP_FLAGS_MASK);
253 static inline bool trap_is_scv(struct pt_regs *regs)
255 return (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && TRAP(regs) == 0x3000);
258 static inline bool trap_is_unsupported_scv(struct pt_regs *regs)
260 return IS_ENABLED(CONFIG_PPC_BOOK3S_64) && TRAP(regs) == 0x7ff0;
263 static inline bool trap_is_syscall(struct pt_regs *regs)
265 return (trap_is_scv(regs) || TRAP(regs) == 0xc00);
268 static inline bool trap_norestart(struct pt_regs *regs)
270 return regs->trap & 0x1;
273 static __always_inline void set_trap_norestart(struct pt_regs *regs)
278 #define kernel_stack_pointer(regs) ((regs)->gpr[1])
279 static inline int is_syscall_success(struct pt_regs *regs)
281 if (trap_is_scv(regs))
282 return !IS_ERR_VALUE((unsigned long)regs->gpr[3]);
284 return !(regs->ccr & 0x10000000);
287 static inline long regs_return_value(struct pt_regs *regs)
289 if (trap_is_scv(regs))
292 if (is_syscall_success(regs))
295 return -regs->gpr[3];
298 static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
303 static inline bool cpu_has_msr_ri(void)
305 return !IS_ENABLED(CONFIG_BOOKE_OR_40x);
308 static inline bool regs_is_unrecoverable(struct pt_regs *regs)
310 return unlikely(cpu_has_msr_ri() && !(regs->msr & MSR_RI));
313 static inline void regs_set_recoverable(struct pt_regs *regs)
315 if (cpu_has_msr_ri())
316 regs_set_return_msr(regs, regs->msr | MSR_RI);
319 static inline void regs_set_unrecoverable(struct pt_regs *regs)
321 if (cpu_has_msr_ri())
322 regs_set_return_msr(regs, regs->msr & ~MSR_RI);
325 #define arch_has_single_step() (1)
326 #define arch_has_block_step() (true)
327 #define ARCH_HAS_USER_SINGLE_STEP_REPORT
330 * kprobe-based event tracer support
333 #include <linux/stddef.h>
334 #include <linux/thread_info.h>
335 extern int regs_query_register_offset(const char *name);
336 extern const char *regs_query_register_name(unsigned int offset);
337 #define MAX_REG_OFFSET (offsetof(struct pt_regs, dsisr))
340 * regs_get_register() - get register value from its offset
341 * @regs: pt_regs from which register value is gotten
342 * @offset: offset number of the register.
344 * regs_get_register returns the value of a register whose offset from @regs.
345 * The @offset is the offset of the register in struct pt_regs.
346 * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
348 static inline unsigned long regs_get_register(struct pt_regs *regs,
351 if (unlikely(offset > MAX_REG_OFFSET))
353 return *(unsigned long *)((unsigned long)regs + offset);
357 * regs_within_kernel_stack() - check the address in the stack
358 * @regs: pt_regs which contains kernel stack pointer.
359 * @addr: address which is checked.
361 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
362 * If @addr is within the kernel stack, it returns true. If not, returns false.
365 static inline bool regs_within_kernel_stack(struct pt_regs *regs,
368 return ((addr & ~(THREAD_SIZE - 1)) ==
369 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
373 * regs_get_kernel_stack_nth() - get Nth entry of the stack
374 * @regs: pt_regs which contains kernel stack pointer.
375 * @n: stack entry number.
377 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
378 * is specified by @regs. If the @n th entry is NOT in the kernel stack,
381 static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
384 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
386 if (regs_within_kernel_stack(regs, (unsigned long)addr))
392 #endif /* __ASSEMBLY__ */
394 #ifndef __powerpc64__
395 /* We need PT_SOFTE defined at all time to avoid #ifdefs */
396 #define PT_SOFTE PT_MQ
397 #else /* __powerpc64__ */
398 #define PT_FPSCR32 (PT_FPR0 + 2*32 + 1) /* each FP reg occupies 2 32-bit userspace slots */
399 #define PT_VR0_32 164 /* each Vector reg occupies 4 slots in 32-bit */
400 #define PT_VSCR_32 (PT_VR0 + 32*4 + 3)
401 #define PT_VRSAVE_32 (PT_VR0 + 33*4)
402 #define PT_VSR0_32 300 /* each VSR reg occupies 4 slots in 32-bit */
403 #endif /* __powerpc64__ */
404 #endif /* _ASM_POWERPC_PTRACE_H */