stackleak: allow to specify arch specific stackleak poison function
authorHeiko Carstens <hca@linux.ibm.com>
Wed, 5 Apr 2023 13:08:40 +0000 (15:08 +0200)
committerVasily Gorbik <gor@linux.ibm.com>
Thu, 20 Apr 2023 09:36:35 +0000 (11:36 +0200)
Factor out the code that fills the stack with the stackleak poison value
in order to allow architectures to provide a faster implementation.

Acked-by: Vasily Gorbik <gor@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Link: https://lore.kernel.org/r/20230405130841.1350565-2-hca@linux.ibm.com
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
kernel/stackleak.c

index c2c33d2202e9a16546e5cd6b49aeccdc116cd218..34c9d81eea9409410fb289f19108bb7fb8488810 100644 (file)
@@ -70,6 +70,18 @@ late_initcall(stackleak_sysctls_init);
 #define skip_erasing() false
 #endif /* CONFIG_STACKLEAK_RUNTIME_DISABLE */
 
+#ifndef __stackleak_poison
+static __always_inline void __stackleak_poison(unsigned long erase_low,
+                                              unsigned long erase_high,
+                                              unsigned long poison)
+{
+       while (erase_low < erase_high) {
+               *(unsigned long *)erase_low = poison;
+               erase_low += sizeof(unsigned long);
+       }
+}
+#endif
+
 static __always_inline void __stackleak_erase(bool on_task_stack)
 {
        const unsigned long task_stack_low = stackleak_task_low_bound(current);
@@ -101,10 +113,7 @@ static __always_inline void __stackleak_erase(bool on_task_stack)
        else
                erase_high = task_stack_high;
 
-       while (erase_low < erase_high) {
-               *(unsigned long *)erase_low = STACKLEAK_POISON;
-               erase_low += sizeof(unsigned long);
-       }
+       __stackleak_poison(erase_low, erase_high, STACKLEAK_POISON);
 
        /* Reset the 'lowest_stack' value for the next syscall */
        current->lowest_stack = task_stack_high;