task_stack: uninline stack_not_used
authorPasha Tatashin <pasha.tatashin@soleen.com>
Wed, 24 Jul 2024 20:33:22 +0000 (20:33 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 2 Sep 2024 03:25:49 +0000 (20:25 -0700)
Given that stack_not_used() is not performance critical function
uninline it.

Link: https://lkml.kernel.org/r/20240730150158.832783-4-pasha.tatashin@soleen.com
Link: https://lkml.kernel.org/r/20240724203322.2765486-4-pasha.tatashin@soleen.com
Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Domenico Cerasuolo <cerasuolodomenico@gmail.com>
Cc: Kent Overstreet <kent.overstreet@linux.dev>
Cc: Li Zhijian <lizhijian@fujitsu.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/sched/task_stack.h
kernel/exit.c
kernel/sched/core.c

index ccd72b978e1fc7104ea7a7279513eb12a5feadcd..bf10bdb487ddc4e73968d3f69ee77b1a80678114 100644 (file)
@@ -95,23 +95,11 @@ static inline int object_is_on_stack(const void *obj)
 extern void thread_stack_cache_init(void);
 
 #ifdef CONFIG_DEBUG_STACK_USAGE
+unsigned long stack_not_used(struct task_struct *p);
+#else
 static inline unsigned long stack_not_used(struct task_struct *p)
 {
-       unsigned long *n = end_of_stack(p);
-
-       do {    /* Skip over canary */
-# ifdef CONFIG_STACK_GROWSUP
-               n--;
-# else
-               n++;
-# endif
-       } while (!*n);
-
-# ifdef CONFIG_STACK_GROWSUP
-       return (unsigned long)end_of_stack(p) - (unsigned long)n;
-# else
-       return (unsigned long)n - (unsigned long)end_of_stack(p);
-# endif
+       return 0;
 }
 #endif
 extern void set_task_stack_end_magic(struct task_struct *tsk);
index 64bfc2bae55ba00a571c9afccf385940bbd5347e..45085a0e7c165530355cb9d819d6f6d0ae755d39 100644 (file)
@@ -778,6 +778,25 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
 }
 
 #ifdef CONFIG_DEBUG_STACK_USAGE
+unsigned long stack_not_used(struct task_struct *p)
+{
+       unsigned long *n = end_of_stack(p);
+
+       do {    /* Skip over canary */
+# ifdef CONFIG_STACK_GROWSUP
+               n--;
+# else
+               n++;
+# endif
+       } while (!*n);
+
+# ifdef CONFIG_STACK_GROWSUP
+       return (unsigned long)end_of_stack(p) - (unsigned long)n;
+# else
+       return (unsigned long)n - (unsigned long)end_of_stack(p);
+# endif
+}
+
 /* Count the maximum pages reached in kernel stacks */
 static inline void kstack_histogram(unsigned long used_stack)
 {
index f3951e4a55e5b6078142f2f2b44359b7560e6e4f..43e701f5401301d93d6ebac34eb86098cfa1dca5 100644 (file)
@@ -7405,7 +7405,7 @@ EXPORT_SYMBOL(io_schedule);
 
 void sched_show_task(struct task_struct *p)
 {
-       unsigned long free = 0;
+       unsigned long free;
        int ppid;
 
        if (!try_get_task_stack(p))
@@ -7415,9 +7415,7 @@ void sched_show_task(struct task_struct *p)
 
        if (task_is_running(p))
                pr_cont("  running task    ");
-#ifdef CONFIG_DEBUG_STACK_USAGE
        free = stack_not_used(p);
-#endif
        ppid = 0;
        rcu_read_lock();
        if (pid_alive(p))