=============
This parameter can be used to control kernel stack erasing at the end
-of syscalls for kernels built with ``CONFIG_GCC_PLUGIN_STACKLEAK``.
+of syscalls for kernels built with ``CONFIG_KSTACK_ERASE``.
That erasing reduces the information which kernel stack leak bugs
can reveal and blocks some uninitialized stack variable attacks.
compilation sees a 1% slowdown, other systems and workloads may vary.
= ====================================================================
-0 Kernel stack erasing is disabled, STACKLEAK_METRICS are not updated.
+0 Kernel stack erasing is disabled, KSTACK_ERASE_METRICS are not updated.
1 Kernel stack erasing is enabled (default), it is performed before
returning to the userspace at the end of syscalls.
= ====================================================================
range must not overlap with anything except the KASAN shadow area, which is
correct as KASAN disables KASLR.
-For both 4- and 5-level layouts, the STACKLEAK_POISON value in the last 2MB
+For both 4- and 5-level layouts, the KSTACK_ERASE_POISON value in the last 2MB
hole: ffffffffffff4111
When releasing memory, it is best to poison the contents, to avoid reuse
attacks that rely on the old contents of memory. E.g., clear stack on a
-syscall return (``CONFIG_GCC_PLUGIN_STACKLEAK``), wipe heap memory on a
+syscall return (``CONFIG_KSTACK_ERASE``), wipe heap memory on a
free. This frustrates many uninitialized variable attacks, stack content
exposures, heap content exposures, and use-after-free attacks.
--------
在释放内存时,最好对内存内容进行清除处理,以防止攻击者重用内存中以前
-的内容。例如,在系统调用返回时清除堆栈(CONFIG_GCC_PLUGIN_STACKLEAK),
+的内容。例如,在系统调用返回时清除堆栈(CONFIG_KSTACK_ERASE),
在释放堆内容是清除其内容。这有助于防止许多未初始化变量攻击、堆栈内容
泄露、堆内容泄露以及使用后释放攻击(user-after-free)。
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
F: Documentation/kbuild/gcc-plugins.rst
-F: include/linux/stackleak.h
-F: kernel/stackleak.c
F: scripts/Makefile.gcc-plugins
F: scripts/gcc-plugins/
F: Documentation/ABI/testing/sysfs-kernel-oops_count
F: Documentation/ABI/testing/sysfs-kernel-warn_count
F: arch/*/configs/hardening.config
+F: include/linux/kstack_erase.h
F: include/linux/overflow.h
F: include/linux/randomize_kstack.h
F: include/linux/ucopysize.h
F: kernel/configs/hardening.config
+F: kernel/kstack_erase.c
F: lib/tests/randstruct_kunit.c
F: lib/tests/usercopy_kunit.c
F: mm/usercopy.c
If unsure, say N.
-config HAVE_ARCH_STACKLEAK
+config HAVE_ARCH_KSTACK_ERASE
bool
help
An architecture should select this if it has the code which
- fills the used part of the kernel stack with the STACKLEAK_POISON
+ fills the used part of the kernel stack with the KSTACK_ERASE_POISON
value before returning from system calls.
config HAVE_STACKPROTECTOR
select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL
select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN
+ select HAVE_ARCH_KSTACK_ERASE
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_PFN_VALID
select HAVE_ARCH_SECCOMP
select HAVE_ARCH_SECCOMP_FILTER if AEABI && !OABI_COMPAT
- select HAVE_ARCH_STACKLEAK
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if ARM_LPAE
HEAD = head.o
OBJS += misc.o decompress.o
-CFLAGS_decompress.o += $(DISABLE_STACKLEAK_PLUGIN)
+CFLAGS_decompress.o += $(DISABLE_KSTACK_ERASE)
ifeq ($(CONFIG_DEBUG_UNCOMPRESS),y)
OBJS += debug.o
AFLAGS_head.o += -DDEBUG
ct_user_enter save = 0
-#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+#ifdef CONFIG_KSTACK_ERASE
bl stackleak_erase_on_task_stack
#endif
restore_user_regs fast = 0, offset = 0
select HAVE_ARCH_KCSAN if EXPERT
select HAVE_ARCH_KFENCE
select HAVE_ARCH_KGDB
+ select HAVE_ARCH_KSTACK_ERASE
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
select HAVE_ARCH_PREL32_RELOCATIONS
select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
select HAVE_ARCH_SECCOMP_FILTER
- select HAVE_ARCH_STACKLEAK
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
SYM_CODE_START_LOCAL(ret_to_user)
ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step
enable_step_tsk x19, x2
-#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+#ifdef CONFIG_KSTACK_ERASE
bl stackleak_erase_on_task_stack
#endif
kernel_exit 0
# Copyright 2022 Google LLC
KBUILD_CFLAGS := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) -fpie \
- -Os -DDISABLE_BRANCH_PROFILING $(DISABLE_STACKLEAK_PLUGIN) \
+ -Os -DDISABLE_BRANCH_PROFILING $(DISABLE_KSTACK_ERASE) \
$(DISABLE_LATENT_ENTROPY_PLUGIN) \
$(call cc-option,-mbranch-protection=none) \
-I$(srctree)/scripts/dtc/libfdt -fno-stack-protector \
ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS -D__DISABLE_TRACE_MMIO__
ccflags-y += -fno-stack-protector \
-DDISABLE_BRANCH_PROFILING \
- $(DISABLE_STACKLEAK_PLUGIN)
+ $(DISABLE_KSTACK_ERASE)
hostprogs := gen-hyprel
HOST_EXTRACFLAGS += -I$(objtree)/include
select HAVE_ARCH_KASAN
select HAVE_ARCH_KFENCE
select HAVE_ARCH_KGDB if PERF_EVENTS
+ select HAVE_ARCH_KSTACK_ERASE
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
select HAVE_ARCH_SECCOMP
select HAVE_ARCH_SECCOMP_FILTER
- select HAVE_ARCH_STACKLEAK
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_ARCH_USERFAULTFD_MINOR if USERFAULTFD
select HAVE_ARCH_KASAN if MMU && 64BIT
select HAVE_ARCH_KASAN_VMALLOC if MMU && 64BIT
select HAVE_ARCH_KFENCE if MMU && 64BIT
+ select HAVE_ARCH_KSTACK_ERASE
select HAVE_ARCH_KGDB if !XIP_KERNEL
select HAVE_ARCH_KGDB_QXFER_PKT
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
select HAVE_ARCH_SECCOMP_FILTER
- select HAVE_ARCH_STACKLEAK
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT && MMU
#endif
bnez s0, 1f
-#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+#ifdef CONFIG_KSTACK_ERASE
call stackleak_erase_on_task_stack
#endif
# This file was copied from arm64/kernel/pi/Makefile.
KBUILD_CFLAGS := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) -fpie \
- -Os -DDISABLE_BRANCH_PROFILING $(DISABLE_STACKLEAK_PLUGIN) \
+ -Os -DDISABLE_BRANCH_PROFILING $(DISABLE_KSTACK_ERASE) \
$(call cc-option,-mbranch-protection=none) \
-I$(srctree)/scripts/dtc/libfdt -fno-stack-protector \
-include $(srctree)/include/linux/hidden.h \
PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
PURGATORY_CFLAGS := -mcmodel=medany -ffreestanding -fno-zero-initialized-in-bss
-PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING
+PURGATORY_CFLAGS += $(DISABLE_KSTACK_ERASE) -DDISABLE_BRANCH_PROFILING
PURGATORY_CFLAGS += -fno-stack-protector -g0
# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
select HAVE_ARCH_KCSAN
select HAVE_ARCH_KMSAN
select HAVE_ARCH_KFENCE
+ select HAVE_ARCH_KSTACK_ERASE
select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_SOFT_DIRTY
- select HAVE_ARCH_STACKLEAK
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_ARCH_VMAP_STACK
#endif
.macro STACKLEAK_ERASE
-#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+#ifdef CONFIG_KSTACK_ERASE
brasl %r14,stackleak_erase_on_task_stack
#endif
.endm
select HAVE_ARCH_KFENCE
select HAVE_ARCH_KMSAN if X86_64
select HAVE_ARCH_KGDB
+ select HAVE_ARCH_KSTACK_ERASE
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT
select HAVE_ARCH_COMPAT_MMAP_BASES if MMU && COMPAT
select HAVE_ARCH_PREL32_RELOCATIONS
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
- select HAVE_ARCH_STACKLEAK
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64
.endm
.macro STACKLEAK_ERASE_NOCLOBBER
-#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+#ifdef CONFIG_KSTACK_ERASE
PUSH_AND_CLEAR_REGS
call stackleak_erase
POP_REGS
#endif /* !CONFIG_X86_64 */
.macro STACKLEAK_ERASE
-#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+#ifdef CONFIG_KSTACK_ERASE
call stackleak_erase
#endif
.endm
PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
PURGATORY_CFLAGS := -mcmodel=small -ffreestanding -fno-zero-initialized-in-bss -g0
PURGATORY_CFLAGS += -fpic -fvisibility=hidden
-PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING
+PURGATORY_CFLAGS += $(DISABLE_KSTACK_ERASE) -DDISABLE_BRANCH_PROFILING
PURGATORY_CFLAGS += -fno-stack-protector
# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
# arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
# disable the stackleak plugin
-cflags-$(CONFIG_ARM64) += -fpie $(DISABLE_STACKLEAK_PLUGIN) \
+cflags-$(CONFIG_ARM64) += -fpie $(DISABLE_KSTACK_ERASE) \
-fno-unwind-tables -fno-asynchronous-unwind-tables
cflags-$(CONFIG_ARM) += -DEFI_HAVE_STRLEN -DEFI_HAVE_STRNLEN \
-DEFI_HAVE_MEMCHR -DEFI_HAVE_STRRCHR \
-DEFI_HAVE_STRCMP -fno-builtin -fpic \
$(call cc-option,-mno-single-pic-base) \
- $(DISABLE_STACKLEAK_PLUGIN)
+ $(DISABLE_KSTACK_ERASE)
cflags-$(CONFIG_RISCV) += -fpic -DNO_ALTERNATIVE -mno-relax \
- $(DISABLE_STACKLEAK_PLUGIN)
-cflags-$(CONFIG_LOONGARCH) += -fpie $(DISABLE_STACKLEAK_PLUGIN)
+ $(DISABLE_KSTACK_ERASE)
+cflags-$(CONFIG_LOONGARCH) += -fpie $(DISABLE_KSTACK_ERASE)
cflags-$(CONFIG_EFI_PARAMS_FROM_FDT) += -I$(srctree)/scripts/dtc/libfdt
lkdtm-$(CONFIG_LKDTM) += refcount.o
lkdtm-$(CONFIG_LKDTM) += rodata_objcopy.o
lkdtm-$(CONFIG_LKDTM) += usercopy.o
-lkdtm-$(CONFIG_LKDTM) += stackleak.o
+lkdtm-$(CONFIG_LKDTM) += kstack_erase.o
lkdtm-$(CONFIG_LKDTM) += cfi.o
lkdtm-$(CONFIG_LKDTM) += fortify.o
lkdtm-$(CONFIG_PPC_64S_HASH_MMU) += powerpc.o
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This code tests that the current task stack is properly erased (filled
+ * with KSTACK_ERASE_POISON).
+ *
+ * Authors:
+ * Alexander Popov <alex.popov@linux.com>
+ * Tycho Andersen <tycho@tycho.ws>
+ */
+
+#include "lkdtm.h"
+#include <linux/kstack_erase.h>
+
+#if defined(CONFIG_KSTACK_ERASE)
+/*
+ * Check that stackleak tracks the lowest stack pointer and erases the stack
+ * below this as expected.
+ *
+ * To prevent the lowest stack pointer changing during the test, IRQs are
+ * masked and instrumentation of this function is disabled. We assume that the
+ * compiler will create a fixed-size stack frame for this function.
+ *
+ * Any non-inlined function may make further use of the stack, altering the
+ * lowest stack pointer and/or clobbering poison values. To avoid spurious
+ * failures we must avoid printing until the end of the test or have already
+ * encountered a failure condition.
+ */
+static void noinstr check_stackleak_irqoff(void)
+{
+ const unsigned long task_stack_base = (unsigned long)task_stack_page(current);
+ const unsigned long task_stack_low = stackleak_task_low_bound(current);
+ const unsigned long task_stack_high = stackleak_task_high_bound(current);
+ const unsigned long current_sp = current_stack_pointer;
+ const unsigned long lowest_sp = current->lowest_stack;
+ unsigned long untracked_high;
+ unsigned long poison_high, poison_low;
+ bool test_failed = false;
+
+ /*
+ * Check that the current and lowest recorded stack pointer values fall
+ * within the expected task stack boundaries. These tests should never
+ * fail unless the boundaries are incorrect or we're clobbering the
+ * STACK_END_MAGIC, and in either casee something is seriously wrong.
+ */
+ if (current_sp < task_stack_low || current_sp >= task_stack_high) {
+ instrumentation_begin();
+ pr_err("FAIL: current_stack_pointer (0x%lx) outside of task stack bounds [0x%lx..0x%lx]\n",
+ current_sp, task_stack_low, task_stack_high - 1);
+ test_failed = true;
+ goto out;
+ }
+ if (lowest_sp < task_stack_low || lowest_sp >= task_stack_high) {
+ instrumentation_begin();
+ pr_err("FAIL: current->lowest_stack (0x%lx) outside of task stack bounds [0x%lx..0x%lx]\n",
+ lowest_sp, task_stack_low, task_stack_high - 1);
+ test_failed = true;
+ goto out;
+ }
+
+ /*
+ * Depending on what has run prior to this test, the lowest recorded
+ * stack pointer could be above or below the current stack pointer.
+ * Start from the lowest of the two.
+ *
+ * Poison values are naturally-aligned unsigned longs. As the current
+ * stack pointer might not be sufficiently aligned, we must align
+ * downwards to find the lowest known stack pointer value. This is the
+ * high boundary for a portion of the stack which may have been used
+ * without being tracked, and has to be scanned for poison.
+ */
+ untracked_high = min(current_sp, lowest_sp);
+ untracked_high = ALIGN_DOWN(untracked_high, sizeof(unsigned long));
+
+ /*
+ * Find the top of the poison in the same way as the erasing code.
+ */
+ poison_high = stackleak_find_top_of_poison(task_stack_low, untracked_high);
+
+ /*
+ * Check whether the poisoned portion of the stack (if any) consists
+ * entirely of poison. This verifies the entries that
+ * stackleak_find_top_of_poison() should have checked.
+ */
+ poison_low = poison_high;
+ while (poison_low > task_stack_low) {
+ poison_low -= sizeof(unsigned long);
+
+ if (*(unsigned long *)poison_low == KSTACK_ERASE_POISON)
+ continue;
+
+ instrumentation_begin();
+ pr_err("FAIL: non-poison value %lu bytes below poison boundary: 0x%lx\n",
+ poison_high - poison_low, *(unsigned long *)poison_low);
+ test_failed = true;
+ goto out;
+ }
+
+ instrumentation_begin();
+ pr_info("kstack erase stack usage:\n"
+ " high offset: %lu bytes\n"
+ " current: %lu bytes\n"
+ " lowest: %lu bytes\n"
+ " tracked: %lu bytes\n"
+ " untracked: %lu bytes\n"
+ " poisoned: %lu bytes\n"
+ " low offset: %lu bytes\n",
+ task_stack_base + THREAD_SIZE - task_stack_high,
+ task_stack_high - current_sp,
+ task_stack_high - lowest_sp,
+ task_stack_high - untracked_high,
+ untracked_high - poison_high,
+ poison_high - task_stack_low,
+ task_stack_low - task_stack_base);
+
+out:
+ if (test_failed) {
+ pr_err("FAIL: the thread stack is NOT properly erased!\n");
+ } else {
+ pr_info("OK: the rest of the thread stack is properly erased\n");
+ }
+ instrumentation_end();
+}
+
+static void lkdtm_KSTACK_ERASE(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ check_stackleak_irqoff();
+ local_irq_restore(flags);
+}
+#else /* defined(CONFIG_KSTACK_ERASE) */
+static void lkdtm_KSTACK_ERASE(void)
+{
+ if (IS_ENABLED(CONFIG_HAVE_ARCH_KSTACK_ERASE)) {
+ pr_err("XFAIL: stackleak is not enabled (CONFIG_KSTACK_ERASE=n)\n");
+ } else {
+ pr_err("XFAIL: stackleak is not supported on this arch (HAVE_ARCH_KSTACK_ERASE=n)\n");
+ }
+}
+#endif /* defined(CONFIG_KSTACK_ERASE) */
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(KSTACK_ERASE),
+};
+
+struct crashtype_category stackleak_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This code tests that the current task stack is properly erased (filled
- * with STACKLEAK_POISON).
- *
- * Authors:
- * Alexander Popov <alex.popov@linux.com>
- * Tycho Andersen <tycho@tycho.ws>
- */
-
-#include "lkdtm.h"
-#include <linux/stackleak.h>
-
-#if defined(CONFIG_GCC_PLUGIN_STACKLEAK)
-/*
- * Check that stackleak tracks the lowest stack pointer and erases the stack
- * below this as expected.
- *
- * To prevent the lowest stack pointer changing during the test, IRQs are
- * masked and instrumentation of this function is disabled. We assume that the
- * compiler will create a fixed-size stack frame for this function.
- *
- * Any non-inlined function may make further use of the stack, altering the
- * lowest stack pointer and/or clobbering poison values. To avoid spurious
- * failures we must avoid printing until the end of the test or have already
- * encountered a failure condition.
- */
-static void noinstr check_stackleak_irqoff(void)
-{
- const unsigned long task_stack_base = (unsigned long)task_stack_page(current);
- const unsigned long task_stack_low = stackleak_task_low_bound(current);
- const unsigned long task_stack_high = stackleak_task_high_bound(current);
- const unsigned long current_sp = current_stack_pointer;
- const unsigned long lowest_sp = current->lowest_stack;
- unsigned long untracked_high;
- unsigned long poison_high, poison_low;
- bool test_failed = false;
-
- /*
- * Check that the current and lowest recorded stack pointer values fall
- * within the expected task stack boundaries. These tests should never
- * fail unless the boundaries are incorrect or we're clobbering the
- * STACK_END_MAGIC, and in either casee something is seriously wrong.
- */
- if (current_sp < task_stack_low || current_sp >= task_stack_high) {
- instrumentation_begin();
- pr_err("FAIL: current_stack_pointer (0x%lx) outside of task stack bounds [0x%lx..0x%lx]\n",
- current_sp, task_stack_low, task_stack_high - 1);
- test_failed = true;
- goto out;
- }
- if (lowest_sp < task_stack_low || lowest_sp >= task_stack_high) {
- instrumentation_begin();
- pr_err("FAIL: current->lowest_stack (0x%lx) outside of task stack bounds [0x%lx..0x%lx]\n",
- lowest_sp, task_stack_low, task_stack_high - 1);
- test_failed = true;
- goto out;
- }
-
- /*
- * Depending on what has run prior to this test, the lowest recorded
- * stack pointer could be above or below the current stack pointer.
- * Start from the lowest of the two.
- *
- * Poison values are naturally-aligned unsigned longs. As the current
- * stack pointer might not be sufficiently aligned, we must align
- * downwards to find the lowest known stack pointer value. This is the
- * high boundary for a portion of the stack which may have been used
- * without being tracked, and has to be scanned for poison.
- */
- untracked_high = min(current_sp, lowest_sp);
- untracked_high = ALIGN_DOWN(untracked_high, sizeof(unsigned long));
-
- /*
- * Find the top of the poison in the same way as the erasing code.
- */
- poison_high = stackleak_find_top_of_poison(task_stack_low, untracked_high);
-
- /*
- * Check whether the poisoned portion of the stack (if any) consists
- * entirely of poison. This verifies the entries that
- * stackleak_find_top_of_poison() should have checked.
- */
- poison_low = poison_high;
- while (poison_low > task_stack_low) {
- poison_low -= sizeof(unsigned long);
-
- if (*(unsigned long *)poison_low == STACKLEAK_POISON)
- continue;
-
- instrumentation_begin();
- pr_err("FAIL: non-poison value %lu bytes below poison boundary: 0x%lx\n",
- poison_high - poison_low, *(unsigned long *)poison_low);
- test_failed = true;
- goto out;
- }
-
- instrumentation_begin();
- pr_info("stackleak stack usage:\n"
- " high offset: %lu bytes\n"
- " current: %lu bytes\n"
- " lowest: %lu bytes\n"
- " tracked: %lu bytes\n"
- " untracked: %lu bytes\n"
- " poisoned: %lu bytes\n"
- " low offset: %lu bytes\n",
- task_stack_base + THREAD_SIZE - task_stack_high,
- task_stack_high - current_sp,
- task_stack_high - lowest_sp,
- task_stack_high - untracked_high,
- untracked_high - poison_high,
- poison_high - task_stack_low,
- task_stack_low - task_stack_base);
-
-out:
- if (test_failed) {
- pr_err("FAIL: the thread stack is NOT properly erased!\n");
- } else {
- pr_info("OK: the rest of the thread stack is properly erased\n");
- }
- instrumentation_end();
-}
-
-static void lkdtm_STACKLEAK_ERASING(void)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- check_stackleak_irqoff();
- local_irq_restore(flags);
-}
-#else /* defined(CONFIG_GCC_PLUGIN_STACKLEAK) */
-static void lkdtm_STACKLEAK_ERASING(void)
-{
- if (IS_ENABLED(CONFIG_HAVE_ARCH_STACKLEAK)) {
- pr_err("XFAIL: stackleak is not enabled (CONFIG_GCC_PLUGIN_STACKLEAK=n)\n");
- } else {
- pr_err("XFAIL: stackleak is not supported on this arch (HAVE_ARCH_STACKLEAK=n)\n");
- }
-}
-#endif /* defined(CONFIG_GCC_PLUGIN_STACKLEAK) */
-
-static struct crashtype crashtypes[] = {
- CRASHTYPE(STACKLEAK_ERASING),
-};
-
-struct crashtype_category stackleak_crashtypes = {
- .crashtypes = crashtypes,
- .len = ARRAY_SIZE(crashtypes),
-};
}
#endif /* CONFIG_KSM */
-#ifdef CONFIG_STACKLEAK_METRICS
+#ifdef CONFIG_KSTACK_ERASE_METRICS
static int proc_stack_depth(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task)
{
prev_depth, depth);
return 0;
}
-#endif /* CONFIG_STACKLEAK_METRICS */
+#endif /* CONFIG_KSTACK_ERASE_METRICS */
/*
* Thread groups
#ifdef CONFIG_LIVEPATCH
ONE("patch_state", S_IRUSR, proc_pid_patch_state),
#endif
-#ifdef CONFIG_STACKLEAK_METRICS
+#ifdef CONFIG_KSTACK_ERASE_METRICS
ONE("stack_depth", S_IRUGO, proc_stack_depth),
#endif
#ifdef CONFIG_PROC_PID_ARCH_STATUS
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_KSTACK_ERASE_H
+#define _LINUX_KSTACK_ERASE_H
+
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+
+/*
+ * Check that the poison value points to the unused hole in the
+ * virtual memory map for your platform.
+ */
+#define KSTACK_ERASE_POISON -0xBEEF
+#define KSTACK_ERASE_SEARCH_DEPTH 128
+
+#ifdef CONFIG_KSTACK_ERASE
+#include <asm/stacktrace.h>
+#include <linux/linkage.h>
+
+/*
+ * The lowest address on tsk's stack which we can plausibly erase.
+ */
+static __always_inline unsigned long
+stackleak_task_low_bound(const struct task_struct *tsk)
+{
+ /*
+ * The lowest unsigned long on the task stack contains STACK_END_MAGIC,
+ * which we must not corrupt.
+ */
+ return (unsigned long)end_of_stack(tsk) + sizeof(unsigned long);
+}
+
+/*
+ * The address immediately after the highest address on tsk's stack which we
+ * can plausibly erase.
+ */
+static __always_inline unsigned long
+stackleak_task_high_bound(const struct task_struct *tsk)
+{
+ /*
+ * The task's pt_regs lives at the top of the task stack and will be
+ * overwritten by exception entry, so there's no need to erase them.
+ */
+ return (unsigned long)task_pt_regs(tsk);
+}
+
+/*
+ * Find the address immediately above the poisoned region of the stack, where
+ * that region falls between 'low' (inclusive) and 'high' (exclusive).
+ */
+static __always_inline unsigned long
+stackleak_find_top_of_poison(const unsigned long low, const unsigned long high)
+{
+ const unsigned int depth = KSTACK_ERASE_SEARCH_DEPTH / sizeof(unsigned long);
+ unsigned int poison_count = 0;
+ unsigned long poison_high = high;
+ unsigned long sp = high;
+
+ while (sp > low && poison_count < depth) {
+ sp -= sizeof(unsigned long);
+
+ if (*(unsigned long *)sp == KSTACK_ERASE_POISON) {
+ poison_count++;
+ } else {
+ poison_count = 0;
+ poison_high = sp;
+ }
+ }
+
+ return poison_high;
+}
+
+static inline void stackleak_task_init(struct task_struct *t)
+{
+ t->lowest_stack = stackleak_task_low_bound(t);
+# ifdef CONFIG_KSTACK_ERASE_METRICS
+ t->prev_lowest_stack = t->lowest_stack;
+# endif
+}
+
+asmlinkage void noinstr stackleak_erase(void);
+asmlinkage void noinstr stackleak_erase_on_task_stack(void);
+asmlinkage void noinstr stackleak_erase_off_task_stack(void);
+void __no_caller_saved_registers noinstr stackleak_track_stack(void);
+
+#else /* !CONFIG_KSTACK_ERASE */
+static inline void stackleak_task_init(struct task_struct *t) { }
+#endif
+
+#endif
/* Used by BPF for per-TASK xdp storage */
struct bpf_net_context *bpf_net_context;
-#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+#ifdef CONFIG_KSTACK_ERASE
unsigned long lowest_stack;
+#endif
+#ifdef CONFIG_KSTACK_ERASE_METRICS
unsigned long prev_lowest_stack;
#endif
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_STACKLEAK_H
-#define _LINUX_STACKLEAK_H
-
-#include <linux/sched.h>
-#include <linux/sched/task_stack.h>
-
-/*
- * Check that the poison value points to the unused hole in the
- * virtual memory map for your platform.
- */
-#define STACKLEAK_POISON -0xBEEF
-#define STACKLEAK_SEARCH_DEPTH 128
-
-#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
-#include <asm/stacktrace.h>
-#include <linux/linkage.h>
-
-/*
- * The lowest address on tsk's stack which we can plausibly erase.
- */
-static __always_inline unsigned long
-stackleak_task_low_bound(const struct task_struct *tsk)
-{
- /*
- * The lowest unsigned long on the task stack contains STACK_END_MAGIC,
- * which we must not corrupt.
- */
- return (unsigned long)end_of_stack(tsk) + sizeof(unsigned long);
-}
-
-/*
- * The address immediately after the highest address on tsk's stack which we
- * can plausibly erase.
- */
-static __always_inline unsigned long
-stackleak_task_high_bound(const struct task_struct *tsk)
-{
- /*
- * The task's pt_regs lives at the top of the task stack and will be
- * overwritten by exception entry, so there's no need to erase them.
- */
- return (unsigned long)task_pt_regs(tsk);
-}
-
-/*
- * Find the address immediately above the poisoned region of the stack, where
- * that region falls between 'low' (inclusive) and 'high' (exclusive).
- */
-static __always_inline unsigned long
-stackleak_find_top_of_poison(const unsigned long low, const unsigned long high)
-{
- const unsigned int depth = STACKLEAK_SEARCH_DEPTH / sizeof(unsigned long);
- unsigned int poison_count = 0;
- unsigned long poison_high = high;
- unsigned long sp = high;
-
- while (sp > low && poison_count < depth) {
- sp -= sizeof(unsigned long);
-
- if (*(unsigned long *)sp == STACKLEAK_POISON) {
- poison_count++;
- } else {
- poison_count = 0;
- poison_high = sp;
- }
- }
-
- return poison_high;
-}
-
-static inline void stackleak_task_init(struct task_struct *t)
-{
- t->lowest_stack = stackleak_task_low_bound(t);
-# ifdef CONFIG_STACKLEAK_METRICS
- t->prev_lowest_stack = t->lowest_stack;
-# endif
-}
-
-asmlinkage void noinstr stackleak_erase(void);
-asmlinkage void noinstr stackleak_erase_on_task_stack(void);
-asmlinkage void noinstr stackleak_erase_off_task_stack(void);
-void __no_caller_saved_registers noinstr stackleak_track_stack(void);
-
-#else /* !CONFIG_GCC_PLUGIN_STACKLEAK */
-static inline void stackleak_task_init(struct task_struct *t) { }
-#endif
-
-#endif
obj-$(CONFIG_RESOURCE_KUNIT_TEST) += resource_kunit.o
obj-$(CONFIG_SYSCTL_KUNIT_TEST) += sysctl-test.o
-CFLAGS_stackleak.o += $(DISABLE_STACKLEAK_PLUGIN)
-obj-$(CONFIG_GCC_PLUGIN_STACKLEAK) += stackleak.o
-KASAN_SANITIZE_stackleak.o := n
-KCSAN_SANITIZE_stackleak.o := n
-KCOV_INSTRUMENT_stackleak.o := n
+CFLAGS_kstack_erase.o += $(DISABLE_KSTACK_ERASE)
+obj-$(CONFIG_KSTACK_ERASE) += kstack_erase.o
+KASAN_SANITIZE_kstack_erase.o := n
+KCSAN_SANITIZE_kstack_erase.o := n
+KCOV_INSTRUMENT_kstack_erase.o := n
obj-$(CONFIG_SCF_TORTURE_TEST) += scftorture.o
#include <linux/kcov.h>
#include <linux/livepatch.h>
#include <linux/thread_info.h>
-#include <linux/stackleak.h>
+#include <linux/kstack_erase.h>
#include <linux/kasan.h>
#include <linux/scs.h>
#include <linux/io_uring.h>
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This code fills the used part of the kernel stack with a poison value
+ * before returning to userspace. It's part of the STACKLEAK feature
+ * ported from grsecurity/PaX.
+ *
+ * Author: Alexander Popov <alex.popov@linux.com>
+ *
+ * KSTACK_ERASE reduces the information which kernel stack leak bugs can
+ * reveal and blocks some uninitialized stack variable attacks.
+ */
+
+#include <linux/kstack_erase.h>
+#include <linux/kprobes.h>
+
+#ifdef CONFIG_KSTACK_ERASE_RUNTIME_DISABLE
+#include <linux/jump_label.h>
+#include <linux/string_choices.h>
+#include <linux/sysctl.h>
+#include <linux/init.h>
+
+static DEFINE_STATIC_KEY_FALSE(stack_erasing_bypass);
+
+#ifdef CONFIG_SYSCTL
+static int stack_erasing_sysctl(const struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ int ret = 0;
+ int state = !static_branch_unlikely(&stack_erasing_bypass);
+ int prev_state = state;
+ struct ctl_table table_copy = *table;
+
+ table_copy.data = &state;
+ ret = proc_dointvec_minmax(&table_copy, write, buffer, lenp, ppos);
+ state = !!state;
+ if (ret || !write || state == prev_state)
+ return ret;
+
+ if (state)
+ static_branch_disable(&stack_erasing_bypass);
+ else
+ static_branch_enable(&stack_erasing_bypass);
+
+ pr_warn("stackleak: kernel stack erasing is %s\n",
+ str_enabled_disabled(state));
+ return ret;
+}
+static const struct ctl_table stackleak_sysctls[] = {
+ {
+ .procname = "stack_erasing",
+ .data = NULL,
+ .maxlen = sizeof(int),
+ .mode = 0600,
+ .proc_handler = stack_erasing_sysctl,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+};
+
+static int __init stackleak_sysctls_init(void)
+{
+ register_sysctl_init("kernel", stackleak_sysctls);
+ return 0;
+}
+late_initcall(stackleak_sysctls_init);
+#endif /* CONFIG_SYSCTL */
+
+#define skip_erasing() static_branch_unlikely(&stack_erasing_bypass)
+#else
+#define skip_erasing() false
+#endif /* CONFIG_KSTACK_ERASE_RUNTIME_DISABLE */
+
+#ifndef __stackleak_poison
+static __always_inline void __stackleak_poison(unsigned long erase_low,
+ unsigned long erase_high,
+ unsigned long poison)
+{
+ while (erase_low < erase_high) {
+ *(unsigned long *)erase_low = poison;
+ erase_low += sizeof(unsigned long);
+ }
+}
+#endif
+
+static __always_inline void __stackleak_erase(bool on_task_stack)
+{
+ const unsigned long task_stack_low = stackleak_task_low_bound(current);
+ const unsigned long task_stack_high = stackleak_task_high_bound(current);
+ unsigned long erase_low, erase_high;
+
+ erase_low = stackleak_find_top_of_poison(task_stack_low,
+ current->lowest_stack);
+
+#ifdef CONFIG_KSTACK_ERASE_METRICS
+ current->prev_lowest_stack = erase_low;
+#endif
+
+ /*
+ * Write poison to the task's stack between 'erase_low' and
+ * 'erase_high'.
+ *
+ * If we're running on a different stack (e.g. an entry trampoline
+ * stack) we can erase everything below the pt_regs at the top of the
+ * task stack.
+ *
+ * If we're running on the task stack itself, we must not clobber any
+ * stack used by this function and its caller. We assume that this
+ * function has a fixed-size stack frame, and the current stack pointer
+ * doesn't change while we write poison.
+ */
+ if (on_task_stack)
+ erase_high = current_stack_pointer;
+ else
+ erase_high = task_stack_high;
+
+ __stackleak_poison(erase_low, erase_high, KSTACK_ERASE_POISON);
+
+ /* Reset the 'lowest_stack' value for the next syscall */
+ current->lowest_stack = task_stack_high;
+}
+
+/*
+ * Erase and poison the portion of the task stack used since the last erase.
+ * Can be called from the task stack or an entry stack when the task stack is
+ * no longer in use.
+ */
+asmlinkage void noinstr stackleak_erase(void)
+{
+ if (skip_erasing())
+ return;
+
+ __stackleak_erase(on_thread_stack());
+}
+
+/*
+ * Erase and poison the portion of the task stack used since the last erase.
+ * Can only be called from the task stack.
+ */
+asmlinkage void noinstr stackleak_erase_on_task_stack(void)
+{
+ if (skip_erasing())
+ return;
+
+ __stackleak_erase(true);
+}
+
+/*
+ * Erase and poison the portion of the task stack used since the last erase.
+ * Can only be called from a stack other than the task stack.
+ */
+asmlinkage void noinstr stackleak_erase_off_task_stack(void)
+{
+ if (skip_erasing())
+ return;
+
+ __stackleak_erase(false);
+}
+
+void __used __no_caller_saved_registers noinstr stackleak_track_stack(void)
+{
+ unsigned long sp = current_stack_pointer;
+
+ /*
+ * Having CONFIG_KSTACK_ERASE_TRACK_MIN_SIZE larger than
+ * KSTACK_ERASE_SEARCH_DEPTH makes the poison search in
+ * stackleak_erase() unreliable. Let's prevent that.
+ */
+ BUILD_BUG_ON(CONFIG_KSTACK_ERASE_TRACK_MIN_SIZE > KSTACK_ERASE_SEARCH_DEPTH);
+
+ /* 'lowest_stack' should be aligned on the register width boundary */
+ sp = ALIGN(sp, sizeof(unsigned long));
+ if (sp < current->lowest_stack &&
+ sp >= stackleak_task_low_bound(current)) {
+ current->lowest_stack = sp;
+ }
+}
+EXPORT_SYMBOL(stackleak_track_stack);
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This code fills the used part of the kernel stack with a poison value
- * before returning to userspace. It's part of the STACKLEAK feature
- * ported from grsecurity/PaX.
- *
- * Author: Alexander Popov <alex.popov@linux.com>
- *
- * STACKLEAK reduces the information which kernel stack leak bugs can
- * reveal and blocks some uninitialized stack variable attacks.
- */
-
-#include <linux/stackleak.h>
-#include <linux/kprobes.h>
-
-#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
-#include <linux/jump_label.h>
-#include <linux/string_choices.h>
-#include <linux/sysctl.h>
-#include <linux/init.h>
-
-static DEFINE_STATIC_KEY_FALSE(stack_erasing_bypass);
-
-#ifdef CONFIG_SYSCTL
-static int stack_erasing_sysctl(const struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- int ret = 0;
- int state = !static_branch_unlikely(&stack_erasing_bypass);
- int prev_state = state;
- struct ctl_table table_copy = *table;
-
- table_copy.data = &state;
- ret = proc_dointvec_minmax(&table_copy, write, buffer, lenp, ppos);
- state = !!state;
- if (ret || !write || state == prev_state)
- return ret;
-
- if (state)
- static_branch_disable(&stack_erasing_bypass);
- else
- static_branch_enable(&stack_erasing_bypass);
-
- pr_warn("stackleak: kernel stack erasing is %s\n",
- str_enabled_disabled(state));
- return ret;
-}
-static const struct ctl_table stackleak_sysctls[] = {
- {
- .procname = "stack_erasing",
- .data = NULL,
- .maxlen = sizeof(int),
- .mode = 0600,
- .proc_handler = stack_erasing_sysctl,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
- },
-};
-
-static int __init stackleak_sysctls_init(void)
-{
- register_sysctl_init("kernel", stackleak_sysctls);
- return 0;
-}
-late_initcall(stackleak_sysctls_init);
-#endif /* CONFIG_SYSCTL */
-
-#define skip_erasing() static_branch_unlikely(&stack_erasing_bypass)
-#else
-#define skip_erasing() false
-#endif /* CONFIG_STACKLEAK_RUNTIME_DISABLE */
-
-#ifndef __stackleak_poison
-static __always_inline void __stackleak_poison(unsigned long erase_low,
- unsigned long erase_high,
- unsigned long poison)
-{
- while (erase_low < erase_high) {
- *(unsigned long *)erase_low = poison;
- erase_low += sizeof(unsigned long);
- }
-}
-#endif
-
-static __always_inline void __stackleak_erase(bool on_task_stack)
-{
- const unsigned long task_stack_low = stackleak_task_low_bound(current);
- const unsigned long task_stack_high = stackleak_task_high_bound(current);
- unsigned long erase_low, erase_high;
-
- erase_low = stackleak_find_top_of_poison(task_stack_low,
- current->lowest_stack);
-
-#ifdef CONFIG_STACKLEAK_METRICS
- current->prev_lowest_stack = erase_low;
-#endif
-
- /*
- * Write poison to the task's stack between 'erase_low' and
- * 'erase_high'.
- *
- * If we're running on a different stack (e.g. an entry trampoline
- * stack) we can erase everything below the pt_regs at the top of the
- * task stack.
- *
- * If we're running on the task stack itself, we must not clobber any
- * stack used by this function and its caller. We assume that this
- * function has a fixed-size stack frame, and the current stack pointer
- * doesn't change while we write poison.
- */
- if (on_task_stack)
- erase_high = current_stack_pointer;
- else
- erase_high = task_stack_high;
-
- __stackleak_poison(erase_low, erase_high, STACKLEAK_POISON);
-
- /* Reset the 'lowest_stack' value for the next syscall */
- current->lowest_stack = task_stack_high;
-}
-
-/*
- * Erase and poison the portion of the task stack used since the last erase.
- * Can be called from the task stack or an entry stack when the task stack is
- * no longer in use.
- */
-asmlinkage void noinstr stackleak_erase(void)
-{
- if (skip_erasing())
- return;
-
- __stackleak_erase(on_thread_stack());
-}
-
-/*
- * Erase and poison the portion of the task stack used since the last erase.
- * Can only be called from the task stack.
- */
-asmlinkage void noinstr stackleak_erase_on_task_stack(void)
-{
- if (skip_erasing())
- return;
-
- __stackleak_erase(true);
-}
-
-/*
- * Erase and poison the portion of the task stack used since the last erase.
- * Can only be called from a stack other than the task stack.
- */
-asmlinkage void noinstr stackleak_erase_off_task_stack(void)
-{
- if (skip_erasing())
- return;
-
- __stackleak_erase(false);
-}
-
-void __used __no_caller_saved_registers noinstr stackleak_track_stack(void)
-{
- unsigned long sp = current_stack_pointer;
-
- /*
- * Having CONFIG_STACKLEAK_TRACK_MIN_SIZE larger than
- * STACKLEAK_SEARCH_DEPTH makes the poison search in
- * stackleak_erase() unreliable. Let's prevent that.
- */
- BUILD_BUG_ON(CONFIG_STACKLEAK_TRACK_MIN_SIZE > STACKLEAK_SEARCH_DEPTH);
-
- /* 'lowest_stack' should be aligned on the register width boundary */
- sp = ALIGN(sp, sizeof(unsigned long));
- if (sp < current->lowest_stack &&
- sp >= stackleak_task_low_bound(current)) {
- current->lowest_stack = sp;
- }
-}
-EXPORT_SYMBOL(stackleak_track_stack);
UBSAN_SANITIZE_ubsan.o := n
KASAN_SANITIZE_ubsan.o := n
KCSAN_SANITIZE_ubsan.o := n
-CFLAGS_ubsan.o := -fno-stack-protector $(DISABLE_STACKLEAK_PLUGIN)
+CFLAGS_ubsan.o := -fno-stack-protector $(DISABLE_KSTACK_ERASE)
obj-$(CONFIG_SBITMAP) += sbitmap.o
gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STACKLEAK) \
+= -DSTACKLEAK_PLUGIN
gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STACKLEAK) \
- += -fplugin-arg-stackleak_plugin-track-min-size=$(CONFIG_STACKLEAK_TRACK_MIN_SIZE)
+ += -fplugin-arg-stackleak_plugin-track-min-size=$(CONFIG_KSTACK_ERASE_TRACK_MIN_SIZE)
gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STACKLEAK) \
+= -fplugin-arg-stackleak_plugin-arch=$(SRCARCH)
gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STACKLEAK_VERBOSE) \
+= -fplugin-arg-stackleak_plugin-verbose
ifdef CONFIG_GCC_PLUGIN_STACKLEAK
- DISABLE_STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-disable
+ DISABLE_KSTACK_ERASE += -fplugin-arg-stackleak_plugin-disable
endif
-export DISABLE_STACKLEAK_PLUGIN
+export DISABLE_KSTACK_ERASE
# All the plugin CFLAGS are collected here in case a build target needs to
# filter them out of the KBUILD_CFLAGS.
endchoice
-config GCC_PLUGIN_STACKLEAK
+config KSTACK_ERASE
bool "Poison kernel stack before returning from syscalls"
+ depends on HAVE_ARCH_KSTACK_ERASE
depends on GCC_PLUGINS
- depends on HAVE_ARCH_STACKLEAK
help
This option makes the kernel erase the kernel stack before
returning from system calls. This has the effect of leaving
are advised to test this feature on your expected workload before
deploying it.
+config GCC_PLUGIN_STACKLEAK
+ def_bool KSTACK_ERASE
+ depends on GCC_PLUGINS
+ help
This plugin was ported from grsecurity/PaX. More information at:
* https://grsecurity.net/
* https://pax.grsecurity.net/
instrumented. This is useful for comparing coverage between
builds.
-config STACKLEAK_TRACK_MIN_SIZE
- int "Minimum stack frame size of functions tracked by STACKLEAK"
+config KSTACK_ERASE_TRACK_MIN_SIZE
+ int "Minimum stack frame size of functions tracked by KSTACK_ERASE"
default 100
range 0 4096
- depends on GCC_PLUGIN_STACKLEAK
+ depends on KSTACK_ERASE
help
- The STACKLEAK gcc plugin instruments the kernel code for tracking
+ The KSTACK_ERASE option instruments the kernel code for tracking
the lowest border of the kernel stack (and for some other purposes).
It inserts the stackleak_track_stack() call for the functions with
a stack frame size greater than or equal to this parameter.
If unsure, leave the default value 100.
-config STACKLEAK_METRICS
- bool "Show STACKLEAK metrics in the /proc file system"
- depends on GCC_PLUGIN_STACKLEAK
+config KSTACK_ERASE_METRICS
+ bool "Show KSTACK_ERASE metrics in the /proc file system"
+ depends on KSTACK_ERASE
depends on PROC_FS
help
- If this is set, STACKLEAK metrics for every task are available in
- the /proc file system. In particular, /proc/<pid>/stack_depth
+ If this is set, KSTACK_ERASE metrics for every task are available
+ in the /proc file system. In particular, /proc/<pid>/stack_depth
shows the maximum kernel stack consumption for the current and
previous syscalls. Although this information is not precise, it
- can be useful for estimating the STACKLEAK performance impact for
- your workloads.
+ can be useful for estimating the KSTACK_ERASE performance impact
+ for your workloads.
-config STACKLEAK_RUNTIME_DISABLE
+config KSTACK_ERASE_RUNTIME_DISABLE
bool "Allow runtime disabling of kernel stack erasing"
- depends on GCC_PLUGIN_STACKLEAK
+ depends on KSTACK_ERASE
help
This option provides 'stack_erasing' sysctl, which can be used in
runtime to control kernel stack erasing for kernels built with
- CONFIG_GCC_PLUGIN_STACKLEAK.
+ CONFIG_KSTACK_ERASE.
config INIT_ON_ALLOC_DEFAULT_ON
bool "Enable heap memory zeroing on allocation by default"
"__ubsan_handle_type_mismatch_v1",
"__ubsan_handle_shift_out_of_bounds",
"__ubsan_handle_load_invalid_value",
- /* STACKLEAK */
+ /* KSTACK_ERASE */
"stackleak_track_stack",
/* TRACE_BRANCH_PROFILING */
"ftrace_likely_update",
CONFIG_DEBUG_LIST=y
CONFIG_SLAB_FREELIST_HARDENED=y
CONFIG_FORTIFY_SOURCE=y
-CONFIG_GCC_PLUGIN_STACKLEAK=y
+CONFIG_KSTACK_ERASE=y
CONFIG_HARDENED_USERCOPY=y
CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT=y
CONFIG_INIT_ON_FREE_DEFAULT_ON=y