x86/apic: Wrap IPI calls into helper functions
authorDave Hansen <dave.hansen@linux.intel.com>
Wed, 9 Aug 2023 15:21:45 +0000 (08:21 -0700)
committerDave Hansen <dave.hansen@linux.intel.com>
Wed, 9 Aug 2023 19:00:55 +0000 (12:00 -0700)
Move them to one place so the static call conversion gets simpler.

No functional change.

[ dhansen: merge against recent x86/apic changes ]

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Michael Kelley <mikelley@microsoft.com>
Tested-by: Sohil Mehta <sohil.mehta@intel.com>
Tested-by: Juergen Gross <jgross@suse.com> # Xen PV (dom0 and unpriv. guest)
13 files changed:
arch/x86/hyperv/hv_spinlock.c
arch/x86/include/asm/apic.h
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/hw_nmi.c
arch/x86/kernel/apic/ipi.c
arch/x86/kernel/apic/vector.c
arch/x86/kernel/cpu/mce/inject.c
arch/x86/kernel/irq_work.c
arch/x86/kernel/nmi_selftest.c
arch/x86/kernel/smp.c
arch/x86/kvm/vmx/posted_intr.c
arch/x86/kvm/vmx/vmx.c
arch/x86/platform/uv/uv_nmi.c

index 91cfe698bde0c531a388df586c00f65c35d34aa5..737d6f7a615535374b6f725ca61d1c8d64c1b1dc 100644 (file)
@@ -20,7 +20,7 @@ static bool __initdata hv_pvspin = true;
 
 static void hv_qlock_kick(int cpu)
 {
-       apic->send_IPI(cpu, X86_PLATFORM_IPI_VECTOR);
+       __apic_send_IPI(cpu, X86_PLATFORM_IPI_VECTOR);
 }
 
 static void hv_qlock_wait(u8 *byte, u8 val)
index 28192a221197a2371e7518ae91de486255eb13cd..62471e20fce09005e384c20676c512659d34ed33 100644 (file)
@@ -401,6 +401,36 @@ static __always_inline void apic_icr_write(u32 low, u32 high)
        apic->icr_write(low, high);
 }
 
+static __always_inline void __apic_send_IPI(int cpu, int vector)
+{
+       apic->send_IPI(cpu, vector);
+}
+
+static __always_inline void __apic_send_IPI_mask(const struct cpumask *mask, int vector)
+{
+       apic->send_IPI_mask(mask, vector);
+}
+
+static __always_inline void __apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
+{
+       apic->send_IPI_mask_allbutself(mask, vector);
+}
+
+static __always_inline void __apic_send_IPI_allbutself(int vector)
+{
+       apic->send_IPI_allbutself(vector);
+}
+
+static __always_inline void __apic_send_IPI_all(int vector)
+{
+       apic->send_IPI_all(vector);
+}
+
+static __always_inline void __apic_send_IPI_self(int vector)
+{
+       apic->send_IPI_self(vector);
+}
+
 static __always_inline void apic_wait_icr_idle(void)
 {
        if (apic->wait_icr_idle)
index 55f2b96674332e53a418675ca454daa6ff7fb896..760adac3d1a824bf4a87d89c44f142043fd2cb73 100644 (file)
@@ -502,7 +502,7 @@ static int lapic_timer_set_oneshot(struct clock_event_device *evt)
 static void lapic_timer_broadcast(const struct cpumask *mask)
 {
 #ifdef CONFIG_SMP
-       apic->send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
+       __apic_send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
 #endif
 }
 
index 34a992e275ef42b94e6f0fb057ff160617cd972a..07a3dd14a6da40ad637252ecda349f7f42845260 100644 (file)
@@ -21,6 +21,8 @@
 #include <linux/init.h>
 #include <linux/delay.h>
 
+#include "local.h"
+
 #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
 u64 hw_nmi_get_sample_period(int watchdog_thresh)
 {
@@ -31,7 +33,7 @@ u64 hw_nmi_get_sample_period(int watchdog_thresh)
 #ifdef arch_trigger_cpumask_backtrace
 static void nmi_raise_cpu_backtrace(cpumask_t *mask)
 {
-       apic->send_IPI_mask(mask, NMI_VECTOR);
+       __apic_send_IPI_mask(mask, NMI_VECTOR);
 }
 
 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
index 460df029e511b2a561ce876ee85393fbc6e4933d..b54b2a6a4c322ae01c67163a43086637a5d5ad44 100644 (file)
@@ -54,9 +54,9 @@ void apic_send_IPI_allbutself(unsigned int vector)
                return;
 
        if (static_branch_likely(&apic_use_ipi_shorthand))
-               apic->send_IPI_allbutself(vector);
+               __apic_send_IPI_allbutself(vector);
        else
-               apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
+               __apic_send_IPI_mask_allbutself(cpu_online_mask, vector);
 }
 
 /*
@@ -70,12 +70,12 @@ void native_smp_send_reschedule(int cpu)
                WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu);
                return;
        }
-       apic->send_IPI(cpu, RESCHEDULE_VECTOR);
+       __apic_send_IPI(cpu, RESCHEDULE_VECTOR);
 }
 
 void native_send_call_func_single_ipi(int cpu)
 {
-       apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
+       __apic_send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
 }
 
 void native_send_call_func_ipi(const struct cpumask *mask)
@@ -87,14 +87,14 @@ void native_send_call_func_ipi(const struct cpumask *mask)
                        goto sendmask;
 
                if (cpumask_test_cpu(cpu, mask))
-                       apic->send_IPI_all(CALL_FUNCTION_VECTOR);
+                       __apic_send_IPI_all(CALL_FUNCTION_VECTOR);
                else if (num_online_cpus() > 1)
-                       apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR);
+                       __apic_send_IPI_allbutself(CALL_FUNCTION_VECTOR);
                return;
        }
 
 sendmask:
-       apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
+       __apic_send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
 }
 
 #endif /* CONFIG_SMP */
@@ -221,7 +221,7 @@ void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
  */
 void default_send_IPI_single(int cpu, int vector)
 {
-       apic->send_IPI_mask(cpumask_of(cpu), vector);
+       __apic_send_IPI_mask(cpumask_of(cpu), vector);
 }
 
 void default_send_IPI_allbutself(int vector)
index 4a262c61c63117328f78b2d5c79fd479bd856f98..319448d87b99a7dbd51795f92b9d21cc70266d78 100644 (file)
@@ -898,7 +898,7 @@ static int apic_retrigger_irq(struct irq_data *irqd)
        unsigned long flags;
 
        raw_spin_lock_irqsave(&vector_lock, flags);
-       apic->send_IPI(apicd->cpu, apicd->vector);
+       __apic_send_IPI(apicd->cpu, apicd->vector);
        raw_spin_unlock_irqrestore(&vector_lock, flags);
 
        return 1;
index 12cf2e7ca33cc45765c3370bf1ded405d3d23f50..4d8d4bcf915dddeba1747414267be26117b616e2 100644 (file)
@@ -270,8 +270,7 @@ static void __maybe_unused raise_mce(struct mce *m)
                                        mce_irq_ipi, NULL, 0);
                                preempt_enable();
                        } else if (m->inject_flags & MCJ_NMI_BROADCAST)
-                               apic->send_IPI_mask(mce_inject_cpumask,
-                                               NMI_VECTOR);
+                               __apic_send_IPI_mask(mce_inject_cpumask, NMI_VECTOR);
                }
                start = jiffies;
                while (!cpumask_empty(mce_inject_cpumask)) {
index ee53419f825e563d60ab13e964b33a9cbd12fdfa..b0a24deab4a1072771e8596bb0f075cbd44eacd2 100644 (file)
@@ -28,7 +28,7 @@ void arch_irq_work_raise(void)
        if (!arch_irq_work_has_interrupt())
                return;
 
-       apic->send_IPI_self(IRQ_WORK_VECTOR);
+       __apic_send_IPI_self(IRQ_WORK_VECTOR);
        apic_wait_icr_idle();
 }
 #endif
index a1a96df3dff11923d59d17d2dc553eb56b333b09..e93a8545c74d1b017b1ffc9a06e6da8ec71b3275 100644 (file)
@@ -75,7 +75,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
        /* sync above data before sending NMI */
        wmb();
 
-       apic->send_IPI_mask(mask, NMI_VECTOR);
+       __apic_send_IPI_mask(mask, NMI_VECTOR);
 
        /* Don't wait longer than a second */
        timeout = USEC_PER_SEC;
index 6299bf8a4d7fbfc48ce2925f65221fd18c1f3b16..6eb06d001bcc648bc3cdbb71f6e5a9c896d6d1c8 100644 (file)
@@ -237,7 +237,7 @@ static void native_stop_other_cpus(int wait)
                        pr_emerg("Shutting down cpus with NMI\n");
 
                        for_each_cpu(cpu, &cpus_stop_mask)
-                               apic->send_IPI(cpu, NMI_VECTOR);
+                               __apic_send_IPI(cpu, NMI_VECTOR);
                }
                /*
                 * Don't wait longer than 10 ms if the caller didn't
index 94c38bea60e78434c4957cc62db60b8ed13322c2..af662312fd07788ced21ec86a630280199de8e98 100644 (file)
@@ -175,7 +175,7 @@ static void pi_enable_wakeup_handler(struct kvm_vcpu *vcpu)
         * scheduled out).
         */
        if (pi_test_on(&new))
-               apic->send_IPI_self(POSTED_INTR_WAKEUP_VECTOR);
+               __apic_send_IPI_self(POSTED_INTR_WAKEUP_VECTOR);
 
        local_irq_restore(flags);
 }
index df461f387e20d40dd6d1bee8f37553133574b8e2..b483a8baaacf32056de25f7e471c0078fee6722d 100644 (file)
@@ -4179,7 +4179,7 @@ static inline void kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
                 */
 
                if (vcpu != kvm_get_running_vcpu())
-                       apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec);
+                       __apic_send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec);
                return;
        }
 #endif
index a60af0230e27b32526ade6f5b3906e81d19327cd..85f9aa02075ab10836d7331816491c7c60053e77 100644 (file)
@@ -601,7 +601,7 @@ static void uv_nmi_nr_cpus_ping(void)
        for_each_cpu(cpu, uv_nmi_cpu_mask)
                uv_cpu_nmi_per(cpu).pinging = 1;
 
-       apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI);
+       __apic_send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI);
 }
 
 /* Clean up flags for CPU's that ignored both NMI and ping */