Merge tag 'rtc-5.18' of git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux
[linux-block.git] / include / linux / randomize_kstack.h
CommitLineData
39218ff4
KC
1/* SPDX-License-Identifier: GPL-2.0-only */
2#ifndef _LINUX_RANDOMIZE_KSTACK_H
3#define _LINUX_RANDOMIZE_KSTACK_H
4
8cb37a59 5#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET
39218ff4
KC
6#include <linux/kernel.h>
7#include <linux/jump_label.h>
8#include <linux/percpu-defs.h>
9
10DECLARE_STATIC_KEY_MAYBE(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT,
11 randomize_kstack_offset);
12DECLARE_PER_CPU(u32, kstack_offset);
13
14/*
15 * Do not use this anywhere else in the kernel. This is used here because
16 * it provides an arch-agnostic way to grow the stack with correct
17 * alignment. Also, since this use is being explicitly masked to a max of
18 * 10 bits, stack-clash style attacks are unlikely. For more details see
19 * "VLAs" in Documentation/process/deprecated.rst
efa90c11
ME
20 *
21 * The normal __builtin_alloca() is initialized with INIT_STACK_ALL (currently
22 * only with Clang and not GCC). Initializing the unused area on each syscall
23 * entry is expensive, and generating an implicit call to memset() may also be
24 * problematic (such as in noinstr functions). Therefore, if the compiler
25 * supports it (which it should if it initializes allocas), always use the
26 * "uninitialized" variant of the builtin.
39218ff4 27 */
efa90c11
ME
28#if __has_builtin(__builtin_alloca_uninitialized)
29#define __kstack_alloca __builtin_alloca_uninitialized
30#else
31#define __kstack_alloca __builtin_alloca
32#endif
33
39218ff4
KC
34/*
35 * Use, at most, 10 bits of entropy. We explicitly cap this to keep the
36 * "VLA" from being unbounded (see above). 10 bits leaves enough room for
37 * per-arch offset masks to reduce entropy (by removing higher bits, since
38 * high entropy may overly constrain usable stack space), and for
39 * compiler/arch-specific stack alignment to remove the lower bits.
40 */
41#define KSTACK_OFFSET_MAX(x) ((x) & 0x3FF)
42
43/*
44 * These macros must be used during syscall entry when interrupts and
45 * preempt are disabled, and after user registers have been stored to
46 * the stack.
47 */
48#define add_random_kstack_offset() do { \
49 if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
50 &randomize_kstack_offset)) { \
51 u32 offset = raw_cpu_read(kstack_offset); \
efa90c11 52 u8 *ptr = __kstack_alloca(KSTACK_OFFSET_MAX(offset)); \
39218ff4 53 /* Keep allocation even after "ptr" loses scope. */ \
2515dd6c 54 asm volatile("" :: "r"(ptr) : "memory"); \
39218ff4
KC
55 } \
56} while (0)
57
58#define choose_random_kstack_offset(rand) do { \
59 if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
60 &randomize_kstack_offset)) { \
61 u32 offset = raw_cpu_read(kstack_offset); \
62 offset ^= (rand); \
63 raw_cpu_write(kstack_offset, offset); \
64 } \
65} while (0)
8cb37a59
ME
66#else /* CONFIG_RANDOMIZE_KSTACK_OFFSET */
67#define add_random_kstack_offset() do { } while (0)
68#define choose_random_kstack_offset(rand) do { } while (0)
69#endif /* CONFIG_RANDOMIZE_KSTACK_OFFSET */
39218ff4
KC
70
71#endif