Revert "sched/x86_64: Don't save flags on context switch"
authorAndy Lutomirski <luto@kernel.org>
Mon, 17 Aug 2015 19:22:50 +0000 (12:22 -0700)
committerIngo Molnar <mingo@kernel.org>
Tue, 18 Aug 2015 07:39:26 +0000 (09:39 +0200)
This reverts commit:

  2c7577a75837 ("sched/x86_64: Don't save flags on context switch")

It was a nice speedup.  It's also not quite correct: SYSENTER
enables interrupts too early.

We can re-add this optimization once the SYSENTER code is beaten
into shape, which should happen in 4.3 or 4.4.

Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: stable@vger.kernel.org # v3.19
Link: http://lkml.kernel.org/r/85f56651f59f76624e80785a8fd3bdfdd089a818.1439838962.git.luto@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/include/asm/switch_to.h

index 751bf4b7bf114da12231a56f4217c2583ddeafb2..d7f3b3b78ac313ca8a871d3a53b3d118850a657d 100644 (file)
@@ -79,12 +79,12 @@ do {                                                                        \
 #else /* CONFIG_X86_32 */
 
 /* frame pointer must be last for get_wchan */
-#define SAVE_CONTEXT    "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
-#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t"
+#define SAVE_CONTEXT    "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
+#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
 
 #define __EXTRA_CLOBBER  \
        , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
-         "r12", "r13", "r14", "r15", "flags"
+         "r12", "r13", "r14", "r15"
 
 #ifdef CONFIG_CC_STACKPROTECTOR
 #define __switch_canary                                                          \
@@ -100,11 +100,7 @@ do {                                                                       \
 #define __switch_canary_iparam
 #endif /* CC_STACKPROTECTOR */
 
-/*
- * There is no need to save or restore flags, because flags are always
- * clean in kernel mode, with the possible exception of IOPL.  Kernel IOPL
- * has no effect.
- */
+/* Save restore flags to clear handle leaking NT */
 #define switch_to(prev, next, last) \
        asm volatile(SAVE_CONTEXT                                         \
             "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */       \