Merge branch 'for-6.3/hid-bpf' into for-linus
[linux-block.git] / include / linux / thread_info.h
... / ...
CommitLineData
1/* SPDX-License-Identifier: GPL-2.0 */
2/* thread_info.h: common low-level thread information accessors
3 *
4 * Copyright (C) 2002 David Howells (dhowells@redhat.com)
5 * - Incorporating suggestions made by Linus Torvalds
6 */
7
8#ifndef _LINUX_THREAD_INFO_H
9#define _LINUX_THREAD_INFO_H
10
11#include <linux/types.h>
12#include <linux/limits.h>
13#include <linux/bug.h>
14#include <linux/restart_block.h>
15#include <linux/errno.h>
16
17#ifdef CONFIG_THREAD_INFO_IN_TASK
18/*
19 * For CONFIG_THREAD_INFO_IN_TASK kernels we need <asm/current.h> for the
20 * definition of current, but for !CONFIG_THREAD_INFO_IN_TASK kernels,
21 * including <asm/current.h> can cause a circular dependency on some platforms.
22 */
23#include <asm/current.h>
24#define current_thread_info() ((struct thread_info *)current)
25#endif
26
27#include <linux/bitops.h>
28
29/*
30 * For per-arch arch_within_stack_frames() implementations, defined in
31 * asm/thread_info.h.
32 */
33enum {
34 BAD_STACK = -1,
35 NOT_STACK = 0,
36 GOOD_FRAME,
37 GOOD_STACK,
38};
39
40#ifdef CONFIG_GENERIC_ENTRY
41enum syscall_work_bit {
42 SYSCALL_WORK_BIT_SECCOMP,
43 SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT,
44 SYSCALL_WORK_BIT_SYSCALL_TRACE,
45 SYSCALL_WORK_BIT_SYSCALL_EMU,
46 SYSCALL_WORK_BIT_SYSCALL_AUDIT,
47 SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH,
48 SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP,
49};
50
51#define SYSCALL_WORK_SECCOMP BIT(SYSCALL_WORK_BIT_SECCOMP)
52#define SYSCALL_WORK_SYSCALL_TRACEPOINT BIT(SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT)
53#define SYSCALL_WORK_SYSCALL_TRACE BIT(SYSCALL_WORK_BIT_SYSCALL_TRACE)
54#define SYSCALL_WORK_SYSCALL_EMU BIT(SYSCALL_WORK_BIT_SYSCALL_EMU)
55#define SYSCALL_WORK_SYSCALL_AUDIT BIT(SYSCALL_WORK_BIT_SYSCALL_AUDIT)
56#define SYSCALL_WORK_SYSCALL_USER_DISPATCH BIT(SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH)
57#define SYSCALL_WORK_SYSCALL_EXIT_TRAP BIT(SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP)
58#endif
59
60#include <asm/thread_info.h>
61
62#ifdef __KERNEL__
63
64#ifndef arch_set_restart_data
65#define arch_set_restart_data(restart) do { } while (0)
66#endif
67
68static inline long set_restart_fn(struct restart_block *restart,
69 long (*fn)(struct restart_block *))
70{
71 restart->fn = fn;
72 arch_set_restart_data(restart);
73 return -ERESTART_RESTARTBLOCK;
74}
75
76#ifndef THREAD_ALIGN
77#define THREAD_ALIGN THREAD_SIZE
78#endif
79
80#define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
81
82/*
83 * flag set/clear/test wrappers
84 * - pass TIF_xxxx constants to these functions
85 */
86
87static inline void set_ti_thread_flag(struct thread_info *ti, int flag)
88{
89 set_bit(flag, (unsigned long *)&ti->flags);
90}
91
92static inline void clear_ti_thread_flag(struct thread_info *ti, int flag)
93{
94 clear_bit(flag, (unsigned long *)&ti->flags);
95}
96
97static inline void update_ti_thread_flag(struct thread_info *ti, int flag,
98 bool value)
99{
100 if (value)
101 set_ti_thread_flag(ti, flag);
102 else
103 clear_ti_thread_flag(ti, flag);
104}
105
106static inline int test_and_set_ti_thread_flag(struct thread_info *ti, int flag)
107{
108 return test_and_set_bit(flag, (unsigned long *)&ti->flags);
109}
110
111static inline int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag)
112{
113 return test_and_clear_bit(flag, (unsigned long *)&ti->flags);
114}
115
116static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
117{
118 return test_bit(flag, (unsigned long *)&ti->flags);
119}
120
121/*
122 * This may be used in noinstr code, and needs to be __always_inline to prevent
123 * inadvertent instrumentation.
124 */
125static __always_inline unsigned long read_ti_thread_flags(struct thread_info *ti)
126{
127 return READ_ONCE(ti->flags);
128}
129
130#define set_thread_flag(flag) \
131 set_ti_thread_flag(current_thread_info(), flag)
132#define clear_thread_flag(flag) \
133 clear_ti_thread_flag(current_thread_info(), flag)
134#define update_thread_flag(flag, value) \
135 update_ti_thread_flag(current_thread_info(), flag, value)
136#define test_and_set_thread_flag(flag) \
137 test_and_set_ti_thread_flag(current_thread_info(), flag)
138#define test_and_clear_thread_flag(flag) \
139 test_and_clear_ti_thread_flag(current_thread_info(), flag)
140#define test_thread_flag(flag) \
141 test_ti_thread_flag(current_thread_info(), flag)
142#define read_thread_flags() \
143 read_ti_thread_flags(current_thread_info())
144
145#define read_task_thread_flags(t) \
146 read_ti_thread_flags(task_thread_info(t))
147
148#ifdef CONFIG_GENERIC_ENTRY
149#define set_syscall_work(fl) \
150 set_bit(SYSCALL_WORK_BIT_##fl, &current_thread_info()->syscall_work)
151#define test_syscall_work(fl) \
152 test_bit(SYSCALL_WORK_BIT_##fl, &current_thread_info()->syscall_work)
153#define clear_syscall_work(fl) \
154 clear_bit(SYSCALL_WORK_BIT_##fl, &current_thread_info()->syscall_work)
155
156#define set_task_syscall_work(t, fl) \
157 set_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
158#define test_task_syscall_work(t, fl) \
159 test_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
160#define clear_task_syscall_work(t, fl) \
161 clear_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
162
163#else /* CONFIG_GENERIC_ENTRY */
164
165#define set_syscall_work(fl) \
166 set_ti_thread_flag(current_thread_info(), TIF_##fl)
167#define test_syscall_work(fl) \
168 test_ti_thread_flag(current_thread_info(), TIF_##fl)
169#define clear_syscall_work(fl) \
170 clear_ti_thread_flag(current_thread_info(), TIF_##fl)
171
172#define set_task_syscall_work(t, fl) \
173 set_ti_thread_flag(task_thread_info(t), TIF_##fl)
174#define test_task_syscall_work(t, fl) \
175 test_ti_thread_flag(task_thread_info(t), TIF_##fl)
176#define clear_task_syscall_work(t, fl) \
177 clear_ti_thread_flag(task_thread_info(t), TIF_##fl)
178#endif /* !CONFIG_GENERIC_ENTRY */
179
180#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
181
182#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
183static inline int arch_within_stack_frames(const void * const stack,
184 const void * const stackend,
185 const void *obj, unsigned long len)
186{
187 return 0;
188}
189#endif
190
191#ifdef CONFIG_HARDENED_USERCOPY
192extern void __check_object_size(const void *ptr, unsigned long n,
193 bool to_user);
194
195static __always_inline void check_object_size(const void *ptr, unsigned long n,
196 bool to_user)
197{
198 if (!__builtin_constant_p(n))
199 __check_object_size(ptr, n, to_user);
200}
201#else
202static inline void check_object_size(const void *ptr, unsigned long n,
203 bool to_user)
204{ }
205#endif /* CONFIG_HARDENED_USERCOPY */
206
207extern void __compiletime_error("copy source size is too small")
208__bad_copy_from(void);
209extern void __compiletime_error("copy destination size is too small")
210__bad_copy_to(void);
211
212void __copy_overflow(int size, unsigned long count);
213
214static inline void copy_overflow(int size, unsigned long count)
215{
216 if (IS_ENABLED(CONFIG_BUG))
217 __copy_overflow(size, count);
218}
219
220static __always_inline __must_check bool
221check_copy_size(const void *addr, size_t bytes, bool is_source)
222{
223 int sz = __builtin_object_size(addr, 0);
224 if (unlikely(sz >= 0 && sz < bytes)) {
225 if (!__builtin_constant_p(bytes))
226 copy_overflow(sz, bytes);
227 else if (is_source)
228 __bad_copy_from();
229 else
230 __bad_copy_to();
231 return false;
232 }
233 if (WARN_ON_ONCE(bytes > INT_MAX))
234 return false;
235 check_object_size(addr, bytes, is_source);
236 return true;
237}
238
239#ifndef arch_setup_new_exec
240static inline void arch_setup_new_exec(void) { }
241#endif
242
243#endif /* __KERNEL__ */
244
245#endif /* _LINUX_THREAD_INFO_H */