arm64: make is_ttbrX_addr() noinstr-safe
[linux-block.git] / arch / arm64 / include / asm / processor.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Based on arch/arm/include/asm/processor.h
4  *
5  * Copyright (C) 1995-1999 Russell King
6  * Copyright (C) 2012 ARM Ltd.
7  */
8 #ifndef __ASM_PROCESSOR_H
9 #define __ASM_PROCESSOR_H
10
11 /*
12  * On arm64 systems, unaligned accesses by the CPU are cheap, and so there is
13  * no point in shifting all network buffers by 2 bytes just to make some IP
14  * header fields appear aligned in memory, potentially sacrificing some DMA
15  * performance on some platforms.
16  */
17 #define NET_IP_ALIGN    0
18
19 #define MTE_CTRL_GCR_USER_EXCL_SHIFT    0
20 #define MTE_CTRL_GCR_USER_EXCL_MASK     0xffff
21
22 #define MTE_CTRL_TCF_SYNC               (1UL << 16)
23 #define MTE_CTRL_TCF_ASYNC              (1UL << 17)
24 #define MTE_CTRL_TCF_ASYMM              (1UL << 18)
25
26 #ifndef __ASSEMBLY__
27
28 #include <linux/build_bug.h>
29 #include <linux/cache.h>
30 #include <linux/init.h>
31 #include <linux/stddef.h>
32 #include <linux/string.h>
33 #include <linux/thread_info.h>
34
35 #include <vdso/processor.h>
36
37 #include <asm/alternative.h>
38 #include <asm/cpufeature.h>
39 #include <asm/hw_breakpoint.h>
40 #include <asm/kasan.h>
41 #include <asm/lse.h>
42 #include <asm/pgtable-hwdef.h>
43 #include <asm/pointer_auth.h>
44 #include <asm/ptrace.h>
45 #include <asm/spectre.h>
46 #include <asm/types.h>
47
48 /*
49  * TASK_SIZE - the maximum size of a user space task.
50  * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
51  */
52
53 #define DEFAULT_MAP_WINDOW_64   (UL(1) << VA_BITS_MIN)
54 #define TASK_SIZE_64            (UL(1) << vabits_actual)
55 #define TASK_SIZE_MAX           (UL(1) << VA_BITS)
56
57 #ifdef CONFIG_COMPAT
58 #if defined(CONFIG_ARM64_64K_PAGES) && defined(CONFIG_KUSER_HELPERS)
59 /*
60  * With CONFIG_ARM64_64K_PAGES enabled, the last page is occupied
61  * by the compat vectors page.
62  */
63 #define TASK_SIZE_32            UL(0x100000000)
64 #else
65 #define TASK_SIZE_32            (UL(0x100000000) - PAGE_SIZE)
66 #endif /* CONFIG_ARM64_64K_PAGES */
67 #define TASK_SIZE               (test_thread_flag(TIF_32BIT) ? \
68                                 TASK_SIZE_32 : TASK_SIZE_64)
69 #define TASK_SIZE_OF(tsk)       (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
70                                 TASK_SIZE_32 : TASK_SIZE_64)
71 #define DEFAULT_MAP_WINDOW      (test_thread_flag(TIF_32BIT) ? \
72                                 TASK_SIZE_32 : DEFAULT_MAP_WINDOW_64)
73 #else
74 #define TASK_SIZE               TASK_SIZE_64
75 #define DEFAULT_MAP_WINDOW      DEFAULT_MAP_WINDOW_64
76 #endif /* CONFIG_COMPAT */
77
78 #ifdef CONFIG_ARM64_FORCE_52BIT
79 #define STACK_TOP_MAX           TASK_SIZE_64
80 #define TASK_UNMAPPED_BASE      (PAGE_ALIGN(TASK_SIZE / 4))
81 #else
82 #define STACK_TOP_MAX           DEFAULT_MAP_WINDOW_64
83 #define TASK_UNMAPPED_BASE      (PAGE_ALIGN(DEFAULT_MAP_WINDOW / 4))
84 #endif /* CONFIG_ARM64_FORCE_52BIT */
85
86 #ifdef CONFIG_COMPAT
87 #define AARCH32_VECTORS_BASE    0xffff0000
88 #define STACK_TOP               (test_thread_flag(TIF_32BIT) ? \
89                                 AARCH32_VECTORS_BASE : STACK_TOP_MAX)
90 #else
91 #define STACK_TOP               STACK_TOP_MAX
92 #endif /* CONFIG_COMPAT */
93
94 #ifndef CONFIG_ARM64_FORCE_52BIT
95 #define arch_get_mmap_end(addr, len, flags) \
96                 (((addr) > DEFAULT_MAP_WINDOW) ? TASK_SIZE : DEFAULT_MAP_WINDOW)
97
98 #define arch_get_mmap_base(addr, base) ((addr > DEFAULT_MAP_WINDOW) ? \
99                                         base + TASK_SIZE - DEFAULT_MAP_WINDOW :\
100                                         base)
101 #endif /* CONFIG_ARM64_FORCE_52BIT */
102
103 extern phys_addr_t arm64_dma_phys_limit;
104 #define ARCH_LOW_ADDRESS_LIMIT  (arm64_dma_phys_limit - 1)
105
106 struct debug_info {
107 #ifdef CONFIG_HAVE_HW_BREAKPOINT
108         /* Have we suspended stepping by a debugger? */
109         int                     suspended_step;
110         /* Allow breakpoints and watchpoints to be disabled for this thread. */
111         int                     bps_disabled;
112         int                     wps_disabled;
113         /* Hardware breakpoints pinned to this task. */
114         struct perf_event       *hbp_break[ARM_MAX_BRP];
115         struct perf_event       *hbp_watch[ARM_MAX_WRP];
116 #endif
117 };
118
119 enum vec_type {
120         ARM64_VEC_SVE = 0,
121         ARM64_VEC_SME,
122         ARM64_VEC_MAX,
123 };
124
125 struct cpu_context {
126         unsigned long x19;
127         unsigned long x20;
128         unsigned long x21;
129         unsigned long x22;
130         unsigned long x23;
131         unsigned long x24;
132         unsigned long x25;
133         unsigned long x26;
134         unsigned long x27;
135         unsigned long x28;
136         unsigned long fp;
137         unsigned long sp;
138         unsigned long pc;
139 };
140
141 struct thread_struct {
142         struct cpu_context      cpu_context;    /* cpu context */
143
144         /*
145          * Whitelisted fields for hardened usercopy:
146          * Maintainers must ensure manually that this contains no
147          * implicit padding.
148          */
149         struct {
150                 unsigned long   tp_value;       /* TLS register */
151                 unsigned long   tp2_value;
152                 struct user_fpsimd_state fpsimd_state;
153         } uw;
154
155         unsigned int            fpsimd_cpu;
156         void                    *sve_state;     /* SVE registers, if any */
157         void                    *za_state;      /* ZA register, if any */
158         unsigned int            vl[ARM64_VEC_MAX];      /* vector length */
159         unsigned int            vl_onexec[ARM64_VEC_MAX]; /* vl after next exec */
160         unsigned long           fault_address;  /* fault info */
161         unsigned long           fault_code;     /* ESR_EL1 value */
162         struct debug_info       debug;          /* debugging */
163 #ifdef CONFIG_ARM64_PTR_AUTH
164         struct ptrauth_keys_user        keys_user;
165 #ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
166         struct ptrauth_keys_kernel      keys_kernel;
167 #endif
168 #endif
169 #ifdef CONFIG_ARM64_MTE
170         u64                     mte_ctrl;
171 #endif
172         u64                     sctlr_user;
173         u64                     svcr;
174         u64                     tpidr2_el0;
175 };
176
177 static inline unsigned int thread_get_vl(struct thread_struct *thread,
178                                          enum vec_type type)
179 {
180         return thread->vl[type];
181 }
182
183 static inline unsigned int thread_get_sve_vl(struct thread_struct *thread)
184 {
185         return thread_get_vl(thread, ARM64_VEC_SVE);
186 }
187
188 static inline unsigned int thread_get_sme_vl(struct thread_struct *thread)
189 {
190         return thread_get_vl(thread, ARM64_VEC_SME);
191 }
192
193 static inline unsigned int thread_get_cur_vl(struct thread_struct *thread)
194 {
195         if (system_supports_sme() && (thread->svcr & SVCR_SM_MASK))
196                 return thread_get_sme_vl(thread);
197         else
198                 return thread_get_sve_vl(thread);
199 }
200
201 unsigned int task_get_vl(const struct task_struct *task, enum vec_type type);
202 void task_set_vl(struct task_struct *task, enum vec_type type,
203                  unsigned long vl);
204 void task_set_vl_onexec(struct task_struct *task, enum vec_type type,
205                         unsigned long vl);
206 unsigned int task_get_vl_onexec(const struct task_struct *task,
207                                 enum vec_type type);
208
209 static inline unsigned int task_get_sve_vl(const struct task_struct *task)
210 {
211         return task_get_vl(task, ARM64_VEC_SVE);
212 }
213
214 static inline unsigned int task_get_sme_vl(const struct task_struct *task)
215 {
216         return task_get_vl(task, ARM64_VEC_SME);
217 }
218
219 static inline void task_set_sve_vl(struct task_struct *task, unsigned long vl)
220 {
221         task_set_vl(task, ARM64_VEC_SVE, vl);
222 }
223
224 static inline unsigned int task_get_sve_vl_onexec(const struct task_struct *task)
225 {
226         return task_get_vl_onexec(task, ARM64_VEC_SVE);
227 }
228
229 static inline void task_set_sve_vl_onexec(struct task_struct *task,
230                                           unsigned long vl)
231 {
232         task_set_vl_onexec(task, ARM64_VEC_SVE, vl);
233 }
234
235 #define SCTLR_USER_MASK                                                        \
236         (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | SCTLR_ELx_ENDA | SCTLR_ELx_ENDB |   \
237          SCTLR_EL1_TCF0_MASK)
238
239 static inline void arch_thread_struct_whitelist(unsigned long *offset,
240                                                 unsigned long *size)
241 {
242         /* Verify that there is no padding among the whitelisted fields: */
243         BUILD_BUG_ON(sizeof_field(struct thread_struct, uw) !=
244                      sizeof_field(struct thread_struct, uw.tp_value) +
245                      sizeof_field(struct thread_struct, uw.tp2_value) +
246                      sizeof_field(struct thread_struct, uw.fpsimd_state));
247
248         *offset = offsetof(struct thread_struct, uw);
249         *size = sizeof_field(struct thread_struct, uw);
250 }
251
252 #ifdef CONFIG_COMPAT
253 #define task_user_tls(t)                                                \
254 ({                                                                      \
255         unsigned long *__tls;                                           \
256         if (is_compat_thread(task_thread_info(t)))                      \
257                 __tls = &(t)->thread.uw.tp2_value;                      \
258         else                                                            \
259                 __tls = &(t)->thread.uw.tp_value;                       \
260         __tls;                                                          \
261  })
262 #else
263 #define task_user_tls(t)        (&(t)->thread.uw.tp_value)
264 #endif
265
266 /* Sync TPIDR_EL0 back to thread_struct for current */
267 void tls_preserve_current_state(void);
268
269 #define INIT_THREAD {                           \
270         .fpsimd_cpu = NR_CPUS,                  \
271 }
272
273 static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
274 {
275         s32 previous_syscall = regs->syscallno;
276         memset(regs, 0, sizeof(*regs));
277         regs->syscallno = previous_syscall;
278         regs->pc = pc;
279
280         if (system_uses_irq_prio_masking())
281                 regs->pmr_save = GIC_PRIO_IRQON;
282 }
283
284 static inline void start_thread(struct pt_regs *regs, unsigned long pc,
285                                 unsigned long sp)
286 {
287         start_thread_common(regs, pc);
288         regs->pstate = PSR_MODE_EL0t;
289         spectre_v4_enable_task_mitigation(current);
290         regs->sp = sp;
291 }
292
293 #ifdef CONFIG_COMPAT
294 static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
295                                        unsigned long sp)
296 {
297         start_thread_common(regs, pc);
298         regs->pstate = PSR_AA32_MODE_USR;
299         if (pc & 1)
300                 regs->pstate |= PSR_AA32_T_BIT;
301
302 #ifdef __AARCH64EB__
303         regs->pstate |= PSR_AA32_E_BIT;
304 #endif
305
306         spectre_v4_enable_task_mitigation(current);
307         regs->compat_sp = sp;
308 }
309 #endif
310
311 static __always_inline bool is_ttbr0_addr(unsigned long addr)
312 {
313         /* entry assembly clears tags for TTBR0 addrs */
314         return addr < TASK_SIZE;
315 }
316
317 static __always_inline bool is_ttbr1_addr(unsigned long addr)
318 {
319         /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
320         return arch_kasan_reset_tag(addr) >= PAGE_OFFSET;
321 }
322
323 /* Forward declaration, a strange C thing */
324 struct task_struct;
325
326 unsigned long __get_wchan(struct task_struct *p);
327
328 void update_sctlr_el1(u64 sctlr);
329
330 /* Thread switching */
331 extern struct task_struct *cpu_switch_to(struct task_struct *prev,
332                                          struct task_struct *next);
333
334 #define task_pt_regs(p) \
335         ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
336
337 #define KSTK_EIP(tsk)   ((unsigned long)task_pt_regs(tsk)->pc)
338 #define KSTK_ESP(tsk)   user_stack_pointer(task_pt_regs(tsk))
339
340 /*
341  * Prefetching support
342  */
343 #define ARCH_HAS_PREFETCH
344 static inline void prefetch(const void *ptr)
345 {
346         asm volatile("prfm pldl1keep, %a0\n" : : "p" (ptr));
347 }
348
349 #define ARCH_HAS_PREFETCHW
350 static inline void prefetchw(const void *ptr)
351 {
352         asm volatile("prfm pstl1keep, %a0\n" : : "p" (ptr));
353 }
354
355 #define ARCH_HAS_SPINLOCK_PREFETCH
356 static inline void spin_lock_prefetch(const void *ptr)
357 {
358         asm volatile(ARM64_LSE_ATOMIC_INSN(
359                      "prfm pstl1strm, %a0",
360                      "nop") : : "p" (ptr));
361 }
362
363 extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */
364 extern void __init minsigstksz_setup(void);
365
366 /*
367  * Not at the top of the file due to a direct #include cycle between
368  * <asm/fpsimd.h> and <asm/processor.h>.  Deferring this #include
369  * ensures that contents of processor.h are visible to fpsimd.h even if
370  * processor.h is included first.
371  *
372  * These prctl helpers are the only things in this file that require
373  * fpsimd.h.  The core code expects them to be in this header.
374  */
375 #include <asm/fpsimd.h>
376
377 /* Userspace interface for PR_S[MV]E_{SET,GET}_VL prctl()s: */
378 #define SVE_SET_VL(arg) sve_set_current_vl(arg)
379 #define SVE_GET_VL()    sve_get_current_vl()
380 #define SME_SET_VL(arg) sme_set_current_vl(arg)
381 #define SME_GET_VL()    sme_get_current_vl()
382
383 /* PR_PAC_RESET_KEYS prctl */
384 #define PAC_RESET_KEYS(tsk, arg)        ptrauth_prctl_reset_keys(tsk, arg)
385
386 /* PR_PAC_{SET,GET}_ENABLED_KEYS prctl */
387 #define PAC_SET_ENABLED_KEYS(tsk, keys, enabled)                                \
388         ptrauth_set_enabled_keys(tsk, keys, enabled)
389 #define PAC_GET_ENABLED_KEYS(tsk) ptrauth_get_enabled_keys(tsk)
390
391 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
392 /* PR_{SET,GET}_TAGGED_ADDR_CTRL prctl */
393 long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg);
394 long get_tagged_addr_ctrl(struct task_struct *task);
395 #define SET_TAGGED_ADDR_CTRL(arg)       set_tagged_addr_ctrl(current, arg)
396 #define GET_TAGGED_ADDR_CTRL()          get_tagged_addr_ctrl(current)
397 #endif
398
399 /*
400  * For CONFIG_GCC_PLUGIN_STACKLEAK
401  *
402  * These need to be macros because otherwise we get stuck in a nightmare
403  * of header definitions for the use of task_stack_page.
404  */
405
406 /*
407  * The top of the current task's task stack
408  */
409 #define current_top_of_stack()  ((unsigned long)current->stack + THREAD_SIZE)
410 #define on_thread_stack()       (on_task_stack(current, current_stack_pointer, 1))
411
412 #endif /* __ASSEMBLY__ */
413 #endif /* __ASM_PROCESSOR_H */