Merge tag 'pci-v6.16-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci
[linux-2.6-block.git] / arch / x86 / include / asm / thread_info.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
2052e8d4
CL
2/* thread_info.h: low-level thread information
3 *
4 * Copyright (C) 2002 David Howells (dhowells@redhat.com)
5 * - Incorporating suggestions made by Linus Torvalds and Dave Miller
6 */
7
1965aae3
PA
8#ifndef _ASM_X86_THREAD_INFO_H
9#define _ASM_X86_THREAD_INFO_H
2052e8d4 10
2052e8d4
CL
11#include <linux/compiler.h>
12#include <asm/page.h>
198d208d 13#include <asm/percpu.h>
12a638e1
CL
14#include <asm/types.h>
15
3ee4298f
AL
16/*
17 * TOP_OF_KERNEL_STACK_PADDING is a number of unused bytes that we
18 * reserve at the top of the kernel stack. We do it because of a nasty
19 * 32-bit corner case. On x86_32, the hardware stack frame is
20 * variable-length. Except for vm86 mode, struct pt_regs assumes a
21 * maximum-length frame. If we enter from CPL 0, the top 8 bytes of
22 * pt_regs don't actually exist. Ordinarily this doesn't matter, but it
23 * does in at least one case:
24 *
25 * If we take an NMI early enough in SYSENTER, then we can end up with
26 * pt_regs that extends above sp0. On the way out, in the espfix code,
27 * we can read the saved SS value, but that value will be above sp0.
28 * Without this offset, that can result in a page fault. (We are
29 * careful that, in this case, the value we read doesn't matter.)
30 *
5ed92a8a
BG
31 * In vm86 mode, the hardware frame is much longer still, so add 16
32 * bytes to make room for the real-mode segments.
3ee4298f 33 *
65c9cc9e
PAI
34 * x86-64 has a fixed-length stack frame, but it depends on whether
35 * or not FRED is enabled. Future versions of FRED might make this
36 * dynamic, but for now it is always 2 words longer.
3ee4298f
AL
37 */
38#ifdef CONFIG_X86_32
5ed92a8a
BG
39# ifdef CONFIG_VM86
40# define TOP_OF_KERNEL_STACK_PADDING 16
41# else
42# define TOP_OF_KERNEL_STACK_PADDING 8
43# endif
65c9cc9e
PAI
44#else /* x86-64 */
45# ifdef CONFIG_X86_FRED
46# define TOP_OF_KERNEL_STACK_PADDING (2 * 8)
47# else
48# define TOP_OF_KERNEL_STACK_PADDING 0
49# endif
3ee4298f
AL
50#endif
51
2052e8d4
CL
52/*
53 * low level task data that entry.S needs immediate access to
54 * - this struct should fit entirely inside of one cache line
55 * - this struct shares the supervisor stack pages
2052e8d4 56 */
24a295e4 57#ifndef __ASSEMBLER__
006c484b 58struct task_struct;
cd4d09ec 59#include <asm/cpufeature.h>
60063497 60#include <linux/atomic.h>
2052e8d4 61
c8061485
HC
62struct thread_info {
63 unsigned long flags; /* low level flags */
b4581a52 64 unsigned long syscall_work; /* SYSCALL_WORK_ flags */
37a8f7c3 65 u32 status; /* thread synchronous flags */
5443f98f
AB
66#ifdef CONFIG_SMP
67 u32 cpu; /* current CPU */
68#endif
c8061485
HC
69};
70
71#define INIT_THREAD_INFO(tsk) \
72{ \
73 .flags = 0, \
74}
75
24a295e4 76#else /* !__ASSEMBLER__ */
2052e8d4
CL
77
78#include <asm/asm-offsets.h>
79
80#endif
81
e57549b0
CL
82/*
83 * thread information flags
84 * - these are process state flags that various assembly files
85 * may need to access
e57549b0 86 */
59e52130 87#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
e57549b0
CL
88#define TIF_SIGPENDING 2 /* signal pending */
89#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
771d271b 90#define TIF_NEED_RESCHED_LAZY 4 /* Lazy rescheduling needed */
476e8583
PZ
91#define TIF_SINGLESTEP 5 /* reenable singlestep on user return*/
92#define TIF_SSBD 6 /* Speculative store bypass disable */
5bfbe3ad 93#define TIF_SPEC_IB 9 /* Indirect branch speculation mitigation */
b5f06f64 94#define TIF_SPEC_L1D_FLUSH 10 /* Flush L1D on mm switches (processes) */
7c68af6e 95#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
0326f5a9 96#define TIF_UPROBE 12 /* breakpointed or singlestepping */
afb94c9e 97#define TIF_PATCH_PENDING 13 /* pending live patching update */
383c2525 98#define TIF_NEED_FPU_LOAD 14 /* load FPU on return to userspace */
e9ea1e7f 99#define TIF_NOCPUID 15 /* CPUID is not accessible in userland */
e57549b0 100#define TIF_NOTSC 16 /* TSC is not accessible in userland */
c8d5ed67 101#define TIF_NOTIFY_SIGNAL 17 /* signal notifications exist */
0ddc9324 102#define TIF_MEMDIE 20 /* is terminating due to OOM killer */
f80c5b39 103#define TIF_POLLING_NRFLAG 21 /* idle is polling for TIF_NEED_RESCHED */
e57549b0 104#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
8aacd1ea 105#define TIF_SPEC_FORCE_UPDATE 23 /* Force speculation MSR update in context switch */
e57549b0 106#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
ea8e61b7 107#define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */
b407fc57 108#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
6bd33008 109#define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
e57549b0 110
59e52130 111#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
e57549b0 112#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
e57549b0 113#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
476e8583 114#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
3a404842 115#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
9f65fb29 116#define _TIF_SSBD (1 << TIF_SSBD)
5bfbe3ad 117#define _TIF_SPEC_IB (1 << TIF_SPEC_IB)
b5f06f64 118#define _TIF_SPEC_L1D_FLUSH (1 << TIF_SPEC_L1D_FLUSH)
7c68af6e 119#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
0326f5a9 120#define _TIF_UPROBE (1 << TIF_UPROBE)
afb94c9e 121#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING)
383c2525 122#define _TIF_NEED_FPU_LOAD (1 << TIF_NEED_FPU_LOAD)
e9ea1e7f 123#define _TIF_NOCPUID (1 << TIF_NOCPUID)
e57549b0 124#define _TIF_NOTSC (1 << TIF_NOTSC)
c8d5ed67 125#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
f80c5b39 126#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
e57549b0 127#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
8aacd1ea 128#define _TIF_SPEC_FORCE_UPDATE (1 << TIF_SPEC_FORCE_UPDATE)
e57549b0 129#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
ea8e61b7 130#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
b407fc57 131#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
6bd33008 132#define _TIF_ADDR32 (1 << TIF_ADDR32)
e57549b0 133
00c1bb13 134/* flags to check in __switch_to() */
22fe5b04
TG
135#define _TIF_WORK_CTXSW_BASE \
136 (_TIF_NOCPUID | _TIF_NOTSC | _TIF_BLOCKSTEP | \
ef79970d 137 _TIF_SSBD | _TIF_SPEC_FORCE_UPDATE)
5635d999
TG
138
139/*
140 * Avoid calls to __switch_to_xtra() on UP as STIBP is not evaluated.
141 */
142#ifdef CONFIG_SMP
143# define _TIF_WORK_CTXSW (_TIF_WORK_CTXSW_BASE | _TIF_SPEC_IB)
144#else
145# define _TIF_WORK_CTXSW (_TIF_WORK_CTXSW_BASE)
146#endif
00c1bb13 147
111e7b15
TG
148#ifdef CONFIG_X86_IOPL_IOPERM
149# define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW| _TIF_USER_RETURN_NOTIFY | \
22fe5b04 150 _TIF_IO_BITMAP)
111e7b15
TG
151#else
152# define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW| _TIF_USER_RETURN_NOTIFY)
153#endif
154
22fe5b04 155#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
00c1bb13 156
198d208d 157#define STACK_WARN (THREAD_SIZE/8)
b84200b3 158
2052e8d4
CL
159/*
160 * macros/functions for gaining access to the thread information structure
161 *
162 * preempt_count needs to be 1 initially, until the scheduler is functional.
163 */
24a295e4 164#ifndef __ASSEMBLER__
2052e8d4 165
0f60a8ef
KC
166/*
167 * Walks up the stack frames to make sure that the specified object is
168 * entirely contained by a single stack frame.
169 *
170 * Returns:
96dc4f9f
S
171 * GOOD_FRAME if within a frame
172 * BAD_STACK if placed across a frame boundary (or outside stack)
173 * NOT_STACK unable to determine (no frame pointers, etc)
ce3ba2af
AP
174 *
175 * This function reads pointers from the stack and dereferences them. The
176 * pointers may not have their KMSAN shadow set up properly, which may result
177 * in false positive reports. Disable instrumentation to avoid those.
0f60a8ef 178 */
ce3ba2af 179__no_kmsan_checks
0f60a8ef
KC
180static inline int arch_within_stack_frames(const void * const stack,
181 const void * const stackend,
182 const void *obj, unsigned long len)
183{
184#if defined(CONFIG_FRAME_POINTER)
185 const void *frame = NULL;
186 const void *oldframe;
187
188 oldframe = __builtin_frame_address(1);
189 if (oldframe)
190 frame = __builtin_frame_address(2);
191 /*
192 * low ----------------------------------------------> high
193 * [saved bp][saved ip][args][local vars][saved bp][saved ip]
194 * ^----------------^
195 * allow copies only within here
196 */
197 while (stack <= frame && frame < stackend) {
198 /*
199 * If obj + len extends past the last frame, this
200 * check won't pass and the next frame will be 0,
201 * causing us to bail out and correctly report
202 * the copy as invalid.
203 */
204 if (obj + len <= frame)
96dc4f9f
S
205 return obj >= oldframe + 2 * sizeof(void *) ?
206 GOOD_FRAME : BAD_STACK;
0f60a8ef
KC
207 oldframe = frame;
208 frame = *(const void * const *)frame;
209 }
96dc4f9f 210 return BAD_STACK;
0f60a8ef 211#else
96dc4f9f 212 return NOT_STACK;
0f60a8ef
KC
213#endif
214}
215
24a295e4 216#endif /* !__ASSEMBLER__ */
2052e8d4 217
66c1b6d7
ON
218/*
219 * Thread-synchronous status.
220 *
221 * This is different from the flags in that nobody else
222 * ever touches our thread-synchronous status, so we don't
223 * have to worry about atomic accesses.
224 */
225#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
226
24a295e4 227#ifndef __ASSEMBLER__
609c19a3
AL
228#ifdef CONFIG_COMPAT
229#define TS_I386_REGS_POKED 0x0004 /* regs poked by 32-bit ptracer */
8c150ba2 230
b2e9df85
ON
231#define arch_set_restart_data(restart) \
232 do { restart->arch_data = current_thread_info()->status; } while (0)
8c150ba2 233
609c19a3 234#endif
ef334a20 235
ef334a20 236#ifdef CONFIG_X86_32
b9d989c7
AL
237#define in_ia32_syscall() true
238#else
239#define in_ia32_syscall() (IS_ENABLED(CONFIG_IA32_EMULATION) && \
37a8f7c3 240 current_thread_info()->status & TS_COMPAT)
ef334a20 241#endif
1daeaa31 242
e9ea1e7f
KH
243extern void arch_setup_new_exec(void);
244#define arch_setup_new_exec arch_setup_new_exec
24a295e4 245#endif /* !__ASSEMBLER__ */
5f870a3f 246
1965aae3 247#endif /* _ASM_X86_THREAD_INFO_H */