smp: Move __smp_call_function_single() below its safe version
[linux-block.git] / kernel / smp.c
index ffee35bef1793107c33c31d7845e4b9c5e9e43d9..fa04ab938e5212e5c1f28e9309e0d1165067cff8 100644 (file)
@@ -117,13 +117,43 @@ static void csd_unlock(struct call_single_data *csd)
        csd->flags &= ~CSD_FLAG_LOCK;
 }
 
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
+
 /*
  * Insert a previously allocated call_single_data element
  * for execution on the given CPU. data must already have
  * ->func, ->info, and ->flags set.
  */
-static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
+static int generic_exec_single(int cpu, struct call_single_data *csd,
+                              smp_call_func_t func, void *info, int wait)
 {
+       struct call_single_data csd_stack = { .flags = 0 };
+       unsigned long flags;
+
+
+       if (cpu == smp_processor_id()) {
+               local_irq_save(flags);
+               func(info);
+               local_irq_restore(flags);
+               return 0;
+       }
+
+
+       if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu))
+               return -ENXIO;
+
+
+       if (!csd) {
+               csd = &csd_stack;
+               if (!wait)
+                       csd = &__get_cpu_var(csd_data);
+       }
+
+       csd_lock(csd);
+
+       csd->func = func;
+       csd->info = info;
+
        if (wait)
                csd->flags |= CSD_FLAG_WAIT;
 
@@ -143,6 +173,8 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
 
        if (wait)
                csd_lock_wait(csd);
+
+       return 0;
 }
 
 /*
@@ -151,7 +183,8 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
  */
 void generic_smp_call_function_single_interrupt(void)
 {
-       struct llist_node *entry, *next;
+       struct llist_node *entry;
+       struct call_single_data *csd, *csd_next;
 
        /*
         * Shouldn't receive this interrupt on a cpu that is not yet online.
@@ -161,21 +194,12 @@ void generic_smp_call_function_single_interrupt(void)
        entry = llist_del_all(&__get_cpu_var(call_single_queue));
        entry = llist_reverse_order(entry);
 
-       while (entry) {
-               struct call_single_data *csd;
-
-               next = entry->next;
-
-               csd = llist_entry(entry, struct call_single_data, llist);
+       llist_for_each_entry_safe(csd, csd_next, entry, llist) {
                csd->func(csd->info);
                csd_unlock(csd);
-
-               entry = next;
        }
 }
 
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
-
 /*
  * smp_call_function_single - Run a function on a specific CPU
  * @func: The function to run. This must be fast and non-blocking.
@@ -187,12 +211,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
 int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
                             int wait)
 {
-       struct call_single_data d = {
-               .flags = 0,
-       };
-       unsigned long flags;
        int this_cpu;
-       int err = 0;
+       int err;
 
        /*
         * prevent preemption and reschedule on another processor,
@@ -209,32 +229,45 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
        WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
                     && !oops_in_progress);
 
-       if (cpu == this_cpu) {
-               local_irq_save(flags);
-               func(info);
-               local_irq_restore(flags);
-       } else {
-               if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
-                       struct call_single_data *csd = &d;
+       err = generic_exec_single(cpu, NULL, func, info, wait);
 
-                       if (!wait)
-                               csd = &__get_cpu_var(csd_data);
+       put_cpu();
 
-                       csd_lock(csd);
+       return err;
+}
+EXPORT_SYMBOL(smp_call_function_single);
 
-                       csd->func = func;
-                       csd->info = info;
-                       generic_exec_single(cpu, csd, wait);
-               } else {
-                       err = -ENXIO;   /* CPU not online */
-               }
-       }
+/**
+ * __smp_call_function_single(): Run a function on a specific CPU
+ * @cpu: The CPU to run on.
+ * @csd: Pre-allocated and setup data structure
+ * @wait: If true, wait until function has completed on specified CPU.
+ *
+ * Like smp_call_function_single(), but allow caller to pass in a
+ * pre-allocated data structure. Useful for embedding @data inside
+ * other structures, for instance.
+ */
+int __smp_call_function_single(int cpu, struct call_single_data *csd, int wait)
+{
+       int err = 0;
+       int this_cpu;
+
+       this_cpu = get_cpu();
+       /*
+        * Can deadlock when called with interrupts disabled.
+        * We allow cpu's that are not yet online though, as no one else can
+        * send smp call function interrupt to this cpu and as such deadlocks
+        * can't happen.
+        */
+       WARN_ON_ONCE(cpu_online(this_cpu) && wait && irqs_disabled()
+                    && !oops_in_progress);
 
+       err = generic_exec_single(cpu, csd, csd->func, csd->info, wait);
        put_cpu();
 
        return err;
 }
-EXPORT_SYMBOL(smp_call_function_single);
+EXPORT_SYMBOL_GPL(__smp_call_function_single);
 
 /*
  * smp_call_function_any - Run a function on any of the given cpus
@@ -279,44 +312,6 @@ call:
 }
 EXPORT_SYMBOL_GPL(smp_call_function_any);
 
-/**
- * __smp_call_function_single(): Run a function on a specific CPU
- * @cpu: The CPU to run on.
- * @data: Pre-allocated and setup data structure
- * @wait: If true, wait until function has completed on specified CPU.
- *
- * Like smp_call_function_single(), but allow caller to pass in a
- * pre-allocated data structure. Useful for embedding @data inside
- * other structures, for instance.
- */
-void __smp_call_function_single(int cpu, struct call_single_data *csd,
-                               int wait)
-{
-       unsigned int this_cpu;
-       unsigned long flags;
-
-       this_cpu = get_cpu();
-       /*
-        * Can deadlock when called with interrupts disabled.
-        * We allow cpu's that are not yet online though, as no one else can
-        * send smp call function interrupt to this cpu and as such deadlocks
-        * can't happen.
-        */
-       WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
-                    && !oops_in_progress);
-
-       if (cpu == this_cpu) {
-               local_irq_save(flags);
-               csd->func(csd->info);
-               local_irq_restore(flags);
-       } else {
-               csd_lock(csd);
-               generic_exec_single(cpu, csd, wait);
-       }
-       put_cpu();
-}
-EXPORT_SYMBOL_GPL(__smp_call_function_single);
-
 /**
  * smp_call_function_many(): Run a function on a set of other CPUs.
  * @mask: The set of cpus to run on (only runs on online subset).