x86/entry: Convert device interrupts to inline stack switching
authorThomas Gleixner <tglx@linutronix.de>
Tue, 9 Feb 2021 23:40:48 +0000 (00:40 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Wed, 10 Feb 2021 22:34:15 +0000 (23:34 +0100)
Convert device interrupts to inline stack switching by replacing the
existing macro implementation with the new inline version. Tweak the
function signature of the actual handler function to have the vector
argument as u32. That allows the inline macro to avoid extra intermediates
and lets the compiler be smarter about the whole thing.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Kees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/r/20210210002512.769728139@linutronix.de
arch/x86/entry/entry_64.S
arch/x86/include/asm/idtentry.h
arch/x86/include/asm/irq_stack.h
arch/x86/kernel/irq.c

index 68643d324f4a0de775a7be123018e92bde41a89c..f446e9048d07b52013ef8dbd1acf79b3699cf375 100644 (file)
@@ -762,7 +762,6 @@ SYM_CODE_END(.Lbad_gs)
  * rdx: Function argument (can be NULL if none)
  */
 SYM_FUNC_START(asm_call_on_stack)
-SYM_INNER_LABEL(asm_call_irq_on_stack, SYM_L_GLOBAL)
        /*
         * Save the frame pointer unconditionally. This allows the ORC
         * unwinder to handle the stack switch.
index 712b3c8d45542f97f43c1d3294260b872fddf78e..f294637ed340f9aacc770d3611165bb31435024f 100644 (file)
@@ -187,23 +187,22 @@ __visible noinstr void func(struct pt_regs *regs, unsigned long error_code)
  * has to be done in the function body if necessary.
  */
 #define DEFINE_IDTENTRY_IRQ(func)                                      \
-static __always_inline void __##func(struct pt_regs *regs, u8 vector); \
+static void __##func(struct pt_regs *regs, u32 vector);                        \
                                                                        \
 __visible noinstr void func(struct pt_regs *regs,                      \
                            unsigned long error_code)                   \
 {                                                                      \
        irqentry_state_t state = irqentry_enter(regs);                  \
+       u32 vector = (u32)(u8)error_code;                               \
                                                                        \
        instrumentation_begin();                                        \
-       irq_enter_rcu();                                                \
        kvm_set_cpu_l1tf_flush_l1d();                                   \
-       __##func (regs, (u8)error_code);                                \
-       irq_exit_rcu();                                                 \
+       run_irq_on_irqstack_cond(__##func, regs, vector);               \
        instrumentation_end();                                          \
        irqentry_exit(regs, state);                                     \
 }                                                                      \
                                                                        \
-static __always_inline void __##func(struct pt_regs *regs, u8 vector)
+static noinline void __##func(struct pt_regs *regs, u32 vector)
 
 /**
  * DECLARE_IDTENTRY_SYSVEC - Declare functions for system vector entry points
index 05c37e7b3bcca0cdf35e80af85f2b5c1e10e173b..dabc0cf60df59dd2f7747045a90f1c7cbd94d66d 100644 (file)
                              SYSVEC_CONSTRAINTS, regs);                \
 }
 
+/*
+ * As in ASM_CALL_SYSVEC above the clobbers force the compiler to store
+ * @regs and @vector in callee saved registers.
+ */
+#define ASM_CALL_IRQ                                                   \
+       "call irq_enter_rcu                             \n"             \
+       "movq   %[arg1], %%rdi                          \n"             \
+       "movl   %[arg2], %%esi                          \n"             \
+       "call %P[__func]                                \n"             \
+       "call irq_exit_rcu                              \n"
+
+#define IRQ_CONSTRAINTS        , [arg1] "r" (regs), [arg2] "r" (vector)
+
+#define run_irq_on_irqstack_cond(func, regs, vector)                   \
+{                                                                      \
+       assert_function_type(func, void (*)(struct pt_regs *, u32));    \
+       assert_arg_type(regs, struct pt_regs *);                        \
+       assert_arg_type(vector, u32);                                   \
+                                                                       \
+       call_on_irqstack_cond(func, regs, ASM_CALL_IRQ,                 \
+                             IRQ_CONSTRAINTS, regs, vector);           \
+}
+
 static __always_inline bool irqstack_active(void)
 {
        return __this_cpu_read(hardirq_stack_inuse);
 }
 
 void asm_call_on_stack(void *sp, void (*func)(void), void *arg);
-void asm_call_irq_on_stack(void *sp, void (*func)(struct irq_desc *desc),
-                          struct irq_desc *desc);
 
 static __always_inline void __run_on_irqstack(void (*func)(void))
 {
@@ -180,17 +201,6 @@ static __always_inline void __run_on_irqstack(void (*func)(void))
        __this_cpu_write(hardirq_stack_inuse, false);
 }
 
-static __always_inline void
-__run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
-                     struct irq_desc *desc)
-{
-       void *tos = __this_cpu_read(hardirq_stack_ptr);
-
-       __this_cpu_write(hardirq_stack_inuse, true);
-       asm_call_irq_on_stack(tos, func, desc);
-       __this_cpu_write(hardirq_stack_inuse, false);
-}
-
 #else /* CONFIG_X86_64 */
 
 /* System vector handlers always run on the stack they interrupted. */
@@ -201,10 +211,16 @@ __run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
        irq_exit_rcu();                                                 \
 }
 
+/* Switches to the irq stack within func() */
+#define run_irq_on_irqstack_cond(func, regs, vector)                   \
+{                                                                      \
+       irq_enter_rcu();                                                \
+       func(regs, vector);                                             \
+       irq_exit_rcu();                                                 \
+}
+
 static inline bool irqstack_active(void) { return false; }
 static inline void __run_on_irqstack(void (*func)(void)) { }
-static inline void __run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
-                                        struct irq_desc *desc) { }
 #endif /* !CONFIG_X86_64 */
 
 static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs)
@@ -228,16 +244,4 @@ static __always_inline void run_on_irqstack_cond(void (*func)(void),
                func();
 }
 
-static __always_inline void
-run_irq_on_irqstack_cond(void (*func)(struct irq_desc *desc), struct irq_desc *desc,
-                        struct pt_regs *regs)
-{
-       lockdep_assert_irqs_disabled();
-
-       if (irq_needs_irq_stack(regs))
-               __run_irq_on_irqstack(func, desc);
-       else
-               func(desc);
-}
-
 #endif
index c5dd50369e2f3394bc483b8744ace94eaaea552b..1507b983cd8d694f22e8d90fcb187858e72c376e 100644 (file)
@@ -227,7 +227,7 @@ static __always_inline void handle_irq(struct irq_desc *desc,
                                       struct pt_regs *regs)
 {
        if (IS_ENABLED(CONFIG_X86_64))
-               run_irq_on_irqstack_cond(desc->handle_irq, desc, regs);
+               generic_handle_irq_desc(desc);
        else
                __handle_irq(desc, regs);
 }