Merge tag 'pm-6.16-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
[linux-2.6-block.git] / include / linux / compiler.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef __LINUX_COMPILER_H
3#define __LINUX_COMPILER_H
4
d1515582 5#include <linux/compiler_types.h>
1da177e4 6
d1515582 7#ifndef __ASSEMBLY__
6f33d587 8
1da177e4
LT
9#ifdef __KERNEL__
10
2ed84eeb
SR
11/*
12 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
13 * to disable branch tracing on a per file basis.
14 */
134e6a03 15void ftrace_likely_update(struct ftrace_likely_data *f, int val,
d45ae1f7 16 int expect, int is_constant);
a18ef64f
AB
17#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
18 && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
1f0d69a9
SR
19#define likely_notrace(x) __builtin_expect(!!(x), 1)
20#define unlikely_notrace(x) __builtin_expect(!!(x), 0)
21
d45ae1f7 22#define __branch_check__(x, expect, is_constant) ({ \
2026d357 23 long ______r; \
134e6a03 24 static struct ftrace_likely_data \
e04462fb 25 __aligned(4) \
33def849 26 __section("_ftrace_annotated_branch") \
1f0d69a9 27 ______f = { \
134e6a03
SRV
28 .data.func = __func__, \
29 .data.file = __FILE__, \
30 .data.line = __LINE__, \
1f0d69a9 31 }; \
d45ae1f7
SRV
32 ______r = __builtin_expect(!!(x), expect); \
33 ftrace_likely_update(&______f, ______r, \
34 expect, is_constant); \
1f0d69a9
SR
35 ______r; \
36 })
37
38/*
39 * Using __builtin_constant_p(x) to ignore cases where the return
40 * value is always the same. This idea is taken from a similar patch
41 * written by Daniel Walker.
42 */
43# ifndef likely
d45ae1f7 44# define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x)))
1f0d69a9
SR
45# endif
46# ifndef unlikely
d45ae1f7 47# define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x)))
1f0d69a9 48# endif
2bcd521a
SR
49
50#ifdef CONFIG_PROFILE_ALL_BRANCHES
51/*
52 * "Define 'is'", Bill Clinton
53 * "Define 'if'", Steven Rostedt
54 */
a15fd609
LT
55#define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) )
56
57#define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond))
58
59#define __trace_if_value(cond) ({ \
60 static struct ftrace_branch_data \
61 __aligned(4) \
33def849 62 __section("_ftrace_branch") \
a15fd609
LT
63 __if_trace = { \
64 .func = __func__, \
65 .file = __FILE__, \
66 .line = __LINE__, \
67 }; \
68 (cond) ? \
69 (__if_trace.miss_hit[1]++,1) : \
70 (__if_trace.miss_hit[0]++,0); \
71})
72
2bcd521a
SR
73#endif /* CONFIG_PROFILE_ALL_BRANCHES */
74
1f0d69a9
SR
75#else
76# define likely(x) __builtin_expect(!!(x), 1)
77# define unlikely(x) __builtin_expect(!!(x), 0)
2f0df49c
SRV
78# define likely_notrace(x) likely(x)
79# define unlikely_notrace(x) unlikely(x)
1f0d69a9 80#endif
1da177e4
LT
81
82/* Optimization barrier */
83#ifndef barrier
3347acc6
AS
84/* The "volatile" is due to gcc bugs */
85# define barrier() __asm__ __volatile__("": : :"memory")
1da177e4
LT
86#endif
87
7829fb09 88#ifndef barrier_data
3347acc6
AS
89/*
90 * This version is i.e. to prevent dead stores elimination on @ptr
91 * where gcc and llvm may behave differently when otherwise using
92 * normal barrier(): while gcc behavior gets along with a normal
93 * barrier(), llvm needs an explicit input variable to be assumed
94 * clobbered. The issue is as follows: while the inline asm might
95 * access any memory it wants, the compiler could have fit all of
96 * @ptr into memory registers instead, and since @ptr never escaped
97 * from that, it proved that the inline asm wasn't touching any of
98 * it. This version works well with both compilers, i.e. we're telling
99 * the compiler that the inline asm absolutely may see the contents
100 * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
101 */
102# define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
7829fb09
DB
103#endif
104
173a3efd
AB
105/* workaround for GCC PR82365 if needed */
106#ifndef barrier_before_unreachable
107# define barrier_before_unreachable() do { } while (0)
108#endif
109
38938c87 110/* Unreachable code */
03f16cd0 111#ifdef CONFIG_OBJTOOL
87b512de 112/* Annotate a C jump table to allow objtool to follow the code flow */
73cfc53c 113#define __annotate_jump_table __section(".data.rel.ro.c_jump_table")
03f16cd0 114#else /* !CONFIG_OBJTOOL */
87b512de 115#define __annotate_jump_table
03f16cd0 116#endif /* CONFIG_OBJTOOL */
649ea4d5 117
c837de38
PZ
118/*
119 * Mark a position in code as unreachable. This can be used to
120 * suppress control flow warnings after asm blocks that transfer
121 * control elsewhere.
122 */
123#define unreachable() do { \
c837de38 124 barrier_before_unreachable(); \
fe0640eb 125 __builtin_unreachable(); \
126} while (0)
38938c87 127
b67067f1
NP
128/*
129 * KENTRY - kernel entry point
130 * This can be used to annotate symbols (functions or data) that are used
131 * without their linker symbol being referenced explicitly. For example,
132 * interrupt vector handlers, or functions in the kernel image that are found
133 * programatically.
134 *
135 * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
136 * are handled in their own way (with KEEP() in linker scripts).
137 *
138 * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
139 * linker script. For example an architecture could KEEP() its entire
140 * boot/exception vector code rather than annotate each function and data.
141 */
142#ifndef KENTRY
143# define KENTRY(sym) \
144 extern typeof(sym) sym; \
145 static const unsigned long __kentry_##sym \
146 __used \
a25c13b3 147 __attribute__((__section__("___kentry+" #sym))) \
b67067f1
NP
148 = (unsigned long)&sym;
149#endif
150
1da177e4
LT
151#ifndef RELOC_HIDE
152# define RELOC_HIDE(ptr, off) \
153 ({ unsigned long __ptr; \
154 __ptr = (unsigned long) (ptr); \
155 (typeof(ptr)) (__ptr + (off)); })
156#endif
157
f6b5f1a5
GR
158#define absolute_pointer(val) RELOC_HIDE((void *)(val), 0)
159
fe8c8a12 160#ifndef OPTIMIZER_HIDE_VAR
3e2ffd65
MT
161/* Make the optimizer believe the variable can be manipulated arbitrarily. */
162#define OPTIMIZER_HIDE_VAR(var) \
163 __asm__ ("" : "=r" (var) : "0" (var))
fe8c8a12
CEB
164#endif
165
a8306f2d 166#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
6f33d587 167
37d1a04b
TG
168/**
169 * data_race - mark an expression as containing intentional data races
170 *
171 * This data_race() macro is useful for situations in which data races
172 * should be forgiven. One example is diagnostic code that accesses
173 * shared variables but is not a part of the core synchronization design.
020e6c22
PM
174 * For example, if accesses to a given variable are protected by a lock,
175 * except for diagnostic code, then the accesses under the lock should
176 * be plain C-language accesses and those in the diagnostic code should
177 * use data_race(). This way, KCSAN will complain if buggy lockless
178 * accesses to that variable are introduced, even if the buggy accesses
179 * are protected by READ_ONCE() or WRITE_ONCE().
37d1a04b
TG
180 *
181 * This macro *does not* affect normal code generation, but is a hint
020e6c22
PM
182 * to tooling that data races here are to be ignored. If the access must
183 * be atomic *and* KCSAN should ignore the access, use both data_race()
184 * and READ_ONCE(), for example, data_race(READ_ONCE(x)).
37d1a04b
TG
185 */
186#define data_race(expr) \
d976441f 187({ \
7c812814
AD
188 __kcsan_disable_current(); \
189 __auto_type __v = (expr); \
95c094fc
ME
190 __kcsan_enable_current(); \
191 __v; \
d976441f 192})
230fa253 193
cb7380de 194#ifdef __CHECKER__
243c90e9 195#define __BUILD_BUG_ON_ZERO_MSG(e, msg, ...) (0)
cb7380de 196#else /* __CHECKER__ */
243c90e9 197#define __BUILD_BUG_ON_ZERO_MSG(e, msg, ...) ((int)sizeof(struct {_Static_assert(!(e), msg);}))
cb7380de
KC
198#endif /* __CHECKER__ */
199
200/* &a[0] degrades to a pointer: a different type from an array */
20e5cc26
KC
201#define __is_array(a) (!__same_type((a), &(a)[0]))
202#define __must_be_array(a) __BUILD_BUG_ON_ZERO_MSG(!__is_array(a), \
203 "must be array")
204
205#define __is_byte_array(a) (__is_array(a) && sizeof((a)[0]) == 1)
206#define __must_be_byte_array(a) __BUILD_BUG_ON_ZERO_MSG(!__is_byte_array(a), \
207 "must be byte array")
cb7380de 208
9f25b1fb
KC
209/*
210 * If the "nonstring" attribute isn't available, we have to return true
211 * so the __must_*() checks pass when "nonstring" isn't supported.
212 */
213#if __has_attribute(__nonstring__) && defined(__annotated)
214#define __is_cstr(a) (!__annotated(a, nonstring))
215#define __is_noncstr(a) (__annotated(a, nonstring))
216#else
217#define __is_cstr(a) (true)
218#define __is_noncstr(a) (true)
219#endif
220
cb7380de
KC
221/* Require C Strings (i.e. NUL-terminated) lack the "nonstring" attribute. */
222#define __must_be_cstr(p) \
9f25b1fb
KC
223 __BUILD_BUG_ON_ZERO_MSG(!__is_cstr(p), \
224 "must be C-string (NUL-terminated)")
225#define __must_be_noncstr(p) \
226 __BUILD_BUG_ON_ZERO_MSG(!__is_noncstr(p), \
227 "must be non-C-string (not NUL-terminated)")
cb7380de 228
ac053946
UB
229/*
230 * Use __typeof_unqual__() when available.
231 *
232 * XXX: Remove test for __CHECKER__ once
233 * sparse learns about __typeof_unqual__().
234 */
235#if CC_HAS_TYPEOF_UNQUAL && !defined(__CHECKER__)
236# define USE_TYPEOF_UNQUAL 1
237#endif
238
239/*
240 * Define TYPEOF_UNQUAL() to use __typeof_unqual__() as typeof
241 * operator when available, to return an unqualified type of the exp.
242 */
243#if defined(USE_TYPEOF_UNQUAL)
244# define TYPEOF_UNQUAL(exp) __typeof_unqual__(exp)
245#else
246# define TYPEOF_UNQUAL(exp) __typeof__(exp)
247#endif
248
1da177e4
LT
249#endif /* __KERNEL__ */
250
582077c9
PZ
251#if defined(CONFIG_CFI_CLANG) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
252/*
253 * Force a reference to the external symbol so the compiler generates
254 * __kcfi_typid.
255 */
256#define KCFI_REFERENCE(sym) __ADDRESSABLE(sym)
257#else
258#define KCFI_REFERENCE(sym)
259#endif
260
0ef8047b
JG
261/**
262 * offset_to_ptr - convert a relative memory offset to an absolute pointer
263 * @off: the address of the 32-bit offset value
264 */
265static inline void *offset_to_ptr(const int *off)
266{
267 return (void *)((unsigned long)off + *off);
268}
269
270#endif /* __ASSEMBLY__ */
271
272#ifdef CONFIG_64BIT
273#define ARCH_SEL(a,b) a
274#else
275#define ARCH_SEL(a,b) b
276#endif
277
7290d580
AB
278/*
279 * Force the compiler to emit 'sym' as a symbol, so that we can reference
280 * it from inline assembler. Necessary in case 'sym' could be inlined
281 * otherwise, or eliminated entirely due to lack of references that are
282 * visible to the compiler.
283 */
0ef8047b
JG
284#define ___ADDRESSABLE(sym, __attrs) \
285 static void * __used __attrs \
ed2f752e 286 __UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)(uintptr_t)&sym;
0ef8047b 287
92efda8e
ST
288#define __ADDRESSABLE(sym) \
289 ___ADDRESSABLE(sym, __section(".discard.addressable"))
7290d580 290
0ef8047b
JG
291#define __ADDRESSABLE_ASM(sym) \
292 .pushsection .discard.addressable,"aw"; \
293 .align ARCH_SEL(8,4); \
294 ARCH_SEL(.quad, .long) __stringify(sym); \
295 .popsection;
7290d580 296
0ef8047b 297#define __ADDRESSABLE_ASM_STR(sym) __stringify(__ADDRESSABLE_ASM(sym))
1da177e4 298
598f0ac1
DL
299/*
300 * This returns a constant expression while determining if an argument is
301 * a constant expression, most importantly without evaluating the argument.
302 * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de>
c3b9a398
KC
303 *
304 * Details:
305 * - sizeof() return an integer constant expression, and does not evaluate
306 * the value of its operand; it only examines the type of its operand.
307 * - The results of comparing two integer constant expressions is also
308 * an integer constant expression.
309 * - The first literal "8" isn't important. It could be any literal value.
310 * - The second literal "8" is to avoid warnings about unaligned pointers;
311 * this could otherwise just be "1".
312 * - (long)(x) is used to avoid warnings about 64-bit types on 32-bit
313 * architectures.
314 * - The C Standard defines "null pointer constant", "(void *)0", as
315 * distinct from other void pointers.
316 * - If (x) is an integer constant expression, then the "* 0l" resolves
317 * it into an integer constant expression of value 0. Since it is cast to
318 * "void *", this makes the second operand a null pointer constant.
319 * - If (x) is not an integer constant expression, then the second operand
320 * resolves to a void pointer (but not a null pointer constant: the value
321 * is not an integer constant 0).
322 * - The conditional operator's third operand, "(int *)8", is an object
323 * pointer (to type "int").
324 * - The behavior (including the return type) of the conditional operator
325 * ("operand1 ? operand2 : operand3") depends on the kind of expressions
326 * given for the second and third operands. This is the central mechanism
327 * of the macro:
328 * - When one operand is a null pointer constant (i.e. when x is an integer
329 * constant expression) and the other is an object pointer (i.e. our
330 * third operand), the conditional operator returns the type of the
d7a62d0a 331 * object pointer operand (i.e. "int *"). Here, within the sizeof(), we
c3b9a398
KC
332 * would then get:
333 * sizeof(*((int *)(...)) == sizeof(int) == 4
334 * - When one operand is a void pointer (i.e. when x is not an integer
335 * constant expression) and the other is an object pointer (i.e. our
336 * third operand), the conditional operator returns a "void *" type.
337 * Here, within the sizeof(), we would then get:
338 * sizeof(*((void *)(...)) == sizeof(void) == 1
339 * - The equality comparison to "sizeof(int)" therefore depends on (x):
340 * sizeof(int) == sizeof(int) (x) was a constant expression
341 * sizeof(int) != sizeof(void) (x) was not a constant expression
598f0ac1
DL
342 */
343#define __is_constexpr(x) \
344 (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
345
dcf8e563
BVA
346/*
347 * Whether 'type' is a signed type or an unsigned type. Supports scalar types,
348 * bool and also pointer types.
349 */
350#define is_signed_type(type) (((type)(-1)) < (__force type)1)
4b21d25b 351#define is_unsigned_type(type) (!is_signed_type(type))
dcf8e563 352
22f54687
LT
353/*
354 * Useful shorthand for "is this condition known at compile-time?"
355 *
356 * Note that the condition may involve non-constant values,
357 * but the compiler may know enough about the details of the
358 * values to determine that the condition is statically true.
359 */
360#define statically_true(x) (__builtin_constant_p(x) && (x))
361
4f3d1be4
VM
362/*
363 * Similar to statically_true() but produces a constant expression
364 *
365 * To be used in conjunction with macros, such as BUILD_BUG_ON_ZERO(),
366 * which require their input to be a constant expression and for which
367 * statically_true() would otherwise fail.
368 *
369 * This is a trade-off: const_true() requires all its operands to be
370 * compile time constants. Else, it would always returns false even on
371 * the most trivial cases like:
372 *
373 * true || non_const_var
374 *
375 * On the opposite, statically_true() is able to fold more complex
376 * tautologies and will return true on expressions such as:
377 *
378 * !(non_const_var * 8 % 4)
379 *
380 * For the general case, statically_true() is better.
381 */
382#define const_true(x) __builtin_choose_expr(__is_constexpr(x), x, false)
383
a9a3ed1e
BP
384/*
385 * This is needed in functions which generate the stack canary, see
386 * arch/x86/kernel/smpboot.c::start_secondary() for an example.
387 */
388#define prevent_tail_call_optimization() mb()
389
e506ea45
WD
390#include <asm/rwonce.h>
391
1da177e4 392#endif /* __LINUX_COMPILER_H */