x86/entry/64: Separate cpu_current_top_of_stack from TSS.sp0
[linux-block.git] / arch / x86 / kernel / process.c
index bd6b85fac66696da70e316656ad6f0d51291f8aa..86e83762e3b3789f4582d4510214181750b5b8a5 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/errno.h>
  */
 __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
        .x86_tss = {
-               .sp0 = TOP_OF_INIT_STACK,
+               /*
+                * .sp0 is only used when entering ring 0 from a lower
+                * privilege level.  Since the init task never runs anything
+                * but ring 0 code, there is no need for a valid value here.
+                * Poison it.
+                */
+               .sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
+
+#ifdef CONFIG_X86_64
+               /*
+                * .sp1 is cpu_current_top_of_stack.  The init task never
+                * runs user code, but cpu_current_top_of_stack should still
+                * be well defined before the first context switch.
+                */
+               .sp1 = TOP_OF_INIT_STACK,
+#endif
+
 #ifdef CONFIG_X86_32
                .ss0 = __KERNEL_DS,
                .ss1 = __KERNEL_CS,
@@ -64,9 +81,7 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
          */
        .io_bitmap              = { [0 ... IO_BITMAP_LONGS] = ~0 },
 #endif
-#ifdef CONFIG_X86_32
        .SYSENTER_stack_canary  = STACK_END_MAGIC,
-#endif
 };
 EXPORT_PER_CPU_SYMBOL(cpu_tss);