treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 500
[linux-2.6-block.git] / arch / arm / include / asm / processor.h
CommitLineData
d2912cb1 1/* SPDX-License-Identifier: GPL-2.0-only */
1da177e4 2/*
4baa9922 3 * arch/arm/include/asm/processor.h
1da177e4
LT
4 *
5 * Copyright (C) 1995-1999 Russell King
1da177e4
LT
6 */
7
8#ifndef __ASM_ARM_PROCESSOR_H
9#define __ASM_ARM_PROCESSOR_H
10
1da177e4
LT
11#ifdef __KERNEL__
12
864232fa 13#include <asm/hw_breakpoint.h>
1da177e4 14#include <asm/ptrace.h>
1da177e4 15#include <asm/types.h>
27a84793 16#include <asm/unified.h>
1da177e4 17
922a70d3 18#ifdef __KERNEL__
794baba6 19#define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \
922a70d3
DH
20 TASK_SIZE : TASK_SIZE_26)
21#define STACK_TOP_MAX TASK_SIZE
22#endif
23
1da177e4 24struct debug_info {
864232fa
WD
25#ifdef CONFIG_HAVE_HW_BREAKPOINT
26 struct perf_event *hbp[ARM_MAX_HBP_SLOTS];
27#endif
1da177e4
LT
28};
29
30struct thread_struct {
31 /* fault info */
32 unsigned long address;
33 unsigned long trap_no;
34 unsigned long error_code;
35 /* debugging */
36 struct debug_info debug;
37};
38
08626a60
KC
39/*
40 * Everything usercopied to/from thread_struct is statically-sized, so
41 * no hardened usercopy whitelist is needed.
42 */
43static inline void arch_thread_struct_whitelist(unsigned long *offset,
44 unsigned long *size)
45{
46 *offset = *size = 0;
47}
48
1da177e4
LT
49#define INIT_THREAD { }
50
51#define start_thread(regs,pc,sp) \
52({ \
5e588114
NP
53 unsigned long r7, r8, r9; \
54 \
55 if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC)) { \
56 r7 = regs->ARM_r7; \
57 r8 = regs->ARM_r8; \
58 r9 = regs->ARM_r9; \
59 } \
59f0cb0f 60 memset(regs->uregs, 0, sizeof(regs->uregs)); \
5e588114
NP
61 if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) && \
62 current->personality & FDPIC_FUNCPTRS) { \
63 regs->ARM_r7 = r7; \
64 regs->ARM_r8 = r8; \
65 regs->ARM_r9 = r9; \
66 regs->ARM_r10 = current->mm->start_data; \
67 } else if (!IS_ENABLED(CONFIG_MMU)) \
68 regs->ARM_r10 = current->mm->start_data; \
1da177e4
LT
69 if (current->personality & ADDR_LIMIT_32BIT) \
70 regs->ARM_cpsr = USR_MODE; \
71 else \
72 regs->ARM_cpsr = USR26_MODE; \
73 if (elf_hwcap & HWCAP_THUMB && pc & 1) \
74 regs->ARM_cpsr |= PSR_T_BIT; \
26584853 75 regs->ARM_cpsr |= PSR_ENDSTATE; \
1da177e4
LT
76 regs->ARM_pc = pc & ~1; /* pc */ \
77 regs->ARM_sp = sp; /* sp */ \
1da177e4
LT
78})
79
80/* Forward declaration, a strange C thing */
81struct task_struct;
82
83/* Free all resources held by a thread. */
84extern void release_thread(struct task_struct *);
85
1da177e4
LT
86unsigned long get_wchan(struct task_struct *p);
87
5dab26af 88#if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)
5388a5b8
RK
89#define cpu_relax() \
90 do { \
91 smp_mb(); \
92 __asm__ __volatile__("nop; nop; nop; nop; nop; nop; nop; nop; nop; nop;"); \
93 } while (0)
534be1d5 94#else
1da177e4 95#define cpu_relax() barrier()
534be1d5 96#endif
1da177e4 97
815d5ec8 98#define task_pt_regs(p) \
32d39a93 99 ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
815d5ec8
AV
100
101#define KSTK_EIP(tsk) task_pt_regs(tsk)->ARM_pc
102#define KSTK_ESP(tsk) task_pt_regs(tsk)->ARM_sp
1da177e4 103
27a84793
WD
104#ifdef CONFIG_SMP
105#define __ALT_SMP_ASM(smp, up) \
106 "9998: " smp "\n" \
107 " .pushsection \".alt.smp.init\", \"a\"\n" \
108 " .long 9998b\n" \
109 " " up "\n" \
110 " .popsection\n"
111#else
112#define __ALT_SMP_ASM(smp, up) up
113#endif
114
1da177e4
LT
115/*
116 * Prefetching support - only ARMv5.
117 */
118#if __LINUX_ARM_ARCH__ >= 5
119
120#define ARCH_HAS_PREFETCH
02828845
NP
121static inline void prefetch(const void *ptr)
122{
123 __asm__ __volatile__(
16f719de 124 "pld\t%a0"
e744dff7 125 :: "p" (ptr));
02828845 126}
1da177e4 127
d8f57aa4 128#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
1da177e4 129#define ARCH_HAS_PREFETCHW
d8f57aa4
WD
130static inline void prefetchw(const void *ptr)
131{
132 __asm__ __volatile__(
133 ".arch_extension mp\n"
134 __ALT_SMP_ASM(
43947b88
SA
135 "pldw\t%a0",
136 "pld\t%a0"
d8f57aa4
WD
137 )
138 :: "p" (ptr));
139}
140#endif
1da177e4
LT
141#endif
142
7dbaa466
RH
143#define HAVE_ARCH_PICK_MMAP_LAYOUT
144
1da177e4
LT
145#endif
146
147#endif /* __ASM_ARM_PROCESSOR_H */