From 52d743f3b71265e14560a38f4c835d07b9c6fc4c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 10 Feb 2021 00:40:50 +0100 Subject: [PATCH] x86/softirq: Remove indirection in do_softirq_own_stack() Use the new inline stack switching and remove the old ASM indirect call implementation. Signed-off-by: Thomas Gleixner Reviewed-by: Kees Cook Link: https://lore.kernel.org/r/20210210002512.972714001@linutronix.de --- arch/x86/entry/entry_64.S | 39 ------------------------ arch/x86/include/asm/irq_stack.h | 52 ++++++++++---------------------- arch/x86/kernel/irq_64.c | 2 +- 3 files changed, 17 insertions(+), 76 deletions(-) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index f446e9048d07..bd52f675d11d 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -756,45 +756,6 @@ SYM_CODE_START_LOCAL_NOALIGN(.Lbad_gs) SYM_CODE_END(.Lbad_gs) .previous -/* - * rdi: New stack pointer points to the top word of the stack - * rsi: Function pointer - * rdx: Function argument (can be NULL if none) - */ -SYM_FUNC_START(asm_call_on_stack) - /* - * Save the frame pointer unconditionally. This allows the ORC - * unwinder to handle the stack switch. - */ - pushq %rbp - mov %rsp, %rbp - - /* - * The unwinder relies on the word at the top of the new stack - * page linking back to the previous RSP. - */ - mov %rsp, (%rdi) - mov %rdi, %rsp - /* Move the argument to the right place */ - mov %rdx, %rdi - -1: - .pushsection .discard.instr_begin - .long 1b - . - .popsection - - CALL_NOSPEC rsi - -2: - .pushsection .discard.instr_end - .long 2b - . - .popsection - - /* Restore the previous stack pointer from RBP. */ - leaveq - ret -SYM_FUNC_END(asm_call_on_stack) - #ifdef CONFIG_XEN_PV /* * A note on the "critical region" in our callback handler. diff --git a/arch/x86/include/asm/irq_stack.h b/arch/x86/include/asm/irq_stack.h index dabc0cf60df5..fa444c27772a 100644 --- a/arch/x86/include/asm/irq_stack.h +++ b/arch/x86/include/asm/irq_stack.h @@ -185,20 +185,23 @@ IRQ_CONSTRAINTS, regs, vector); \ } -static __always_inline bool irqstack_active(void) -{ - return __this_cpu_read(hardirq_stack_inuse); -} - -void asm_call_on_stack(void *sp, void (*func)(void), void *arg); +#define ASM_CALL_SOFTIRQ \ + "call %P[__func] \n" -static __always_inline void __run_on_irqstack(void (*func)(void)) -{ - void *tos = __this_cpu_read(hardirq_stack_ptr); - - __this_cpu_write(hardirq_stack_inuse, true); - asm_call_on_stack(tos, func, NULL); - __this_cpu_write(hardirq_stack_inuse, false); +/* + * Macro to invoke __do_softirq on the irq stack. Contrary to the above + * the only check which is necessary is whether the interrupt stack is + * in use already. + */ +#define run_softirq_on_irqstack_cond() \ +{ \ + if (__this_cpu_read(hardirq_stack_inuse)) { \ + __do_softirq(); \ + } else { \ + __this_cpu_write(hardirq_stack_inuse, true); \ + call_on_irqstack(__do_softirq, ASM_CALL_SOFTIRQ); \ + __this_cpu_write(hardirq_stack_inuse, false); \ + } \ } #else /* CONFIG_X86_64 */ @@ -219,29 +222,6 @@ static __always_inline void __run_on_irqstack(void (*func)(void)) irq_exit_rcu(); \ } -static inline bool irqstack_active(void) { return false; } -static inline void __run_on_irqstack(void (*func)(void)) { } #endif /* !CONFIG_X86_64 */ -static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs) -{ - if (IS_ENABLED(CONFIG_X86_32)) - return false; - if (!regs) - return !irqstack_active(); - return !user_mode(regs) && !irqstack_active(); -} - - -static __always_inline void run_on_irqstack_cond(void (*func)(void), - struct pt_regs *regs) -{ - lockdep_assert_irqs_disabled(); - - if (irq_needs_irq_stack(regs)) - __run_on_irqstack(func); - else - func(); -} - #endif diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 7103f9889930..8d9f9a1b49e5 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -76,5 +76,5 @@ int irq_init_percpu_irqstack(unsigned int cpu) void do_softirq_own_stack(void) { - run_on_irqstack_cond(__do_softirq, NULL); + run_softirq_on_irqstack_cond(); } -- 2.25.1