workqueue: move try_to_grab_pending() upwards
[linux-block.git] / kernel / workqueue.c
index 692d97628a106360683dfef46797952cdf1861e1..0f50f4078e367a177675d7fc921a44f33e0aece4 100644 (file)
@@ -537,9 +537,10 @@ static int work_next_color(int color)
  * work is on queue.  Once execution starts, WORK_STRUCT_CWQ is
  * cleared and the work data contains the cpu number it was last on.
  *
- * set_work_{cwq|cpu}() and clear_work_data() can be used to set the
- * cwq, cpu or clear work->data.  These functions should only be
- * called while the work is owned - ie. while the PENDING bit is set.
+ * set_work_cwq(), set_work_cpu_and_clear_pending() and clear_work_data()
+ * can be used to set the cwq, cpu or clear work->data.  These functions
+ * should only be called while the work is owned - ie. while the PENDING
+ * bit is set.
  *
  * get_work_[g]cwq() can be used to obtain the gcwq or cwq
  * corresponding to a work.  gcwq is available once the work has been
@@ -561,9 +562,10 @@ static void set_work_cwq(struct work_struct *work,
                      WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
 }
 
-static void set_work_cpu(struct work_struct *work, unsigned int cpu)
+static void set_work_cpu_and_clear_pending(struct work_struct *work,
+                                          unsigned int cpu)
 {
-       set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING);
+       set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, 0);
 }
 
 static void clear_work_data(struct work_struct *work)
@@ -901,6 +903,149 @@ static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
                                            work);
 }
 
+/**
+ * move_linked_works - move linked works to a list
+ * @work: start of series of works to be scheduled
+ * @head: target list to append @work to
+ * @nextp: out paramter for nested worklist walking
+ *
+ * Schedule linked works starting from @work to @head.  Work series to
+ * be scheduled starts at @work and includes any consecutive work with
+ * WORK_STRUCT_LINKED set in its predecessor.
+ *
+ * If @nextp is not NULL, it's updated to point to the next work of
+ * the last scheduled work.  This allows move_linked_works() to be
+ * nested inside outer list_for_each_entry_safe().
+ *
+ * CONTEXT:
+ * spin_lock_irq(gcwq->lock).
+ */
+static void move_linked_works(struct work_struct *work, struct list_head *head,
+                             struct work_struct **nextp)
+{
+       struct work_struct *n;
+
+       /*
+        * Linked worklist will always end before the end of the list,
+        * use NULL for list head.
+        */
+       list_for_each_entry_safe_from(work, n, NULL, entry) {
+               list_move_tail(&work->entry, head);
+               if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
+                       break;
+       }
+
+       /*
+        * If we're already inside safe list traversal and have moved
+        * multiple works to the scheduled queue, the next position
+        * needs to be updated.
+        */
+       if (nextp)
+               *nextp = n;
+}
+
+static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
+{
+       struct work_struct *work = list_first_entry(&cwq->delayed_works,
+                                                   struct work_struct, entry);
+
+       trace_workqueue_activate_work(work);
+       move_linked_works(work, &cwq->pool->worklist, NULL);
+       __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
+       cwq->nr_active++;
+}
+
+/**
+ * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
+ * @cwq: cwq of interest
+ * @color: color of work which left the queue
+ * @delayed: for a delayed work
+ *
+ * A work either has completed or is removed from pending queue,
+ * decrement nr_in_flight of its cwq and handle workqueue flushing.
+ *
+ * CONTEXT:
+ * spin_lock_irq(gcwq->lock).
+ */
+static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
+                                bool delayed)
+{
+       /* ignore uncolored works */
+       if (color == WORK_NO_COLOR)
+               return;
+
+       cwq->nr_in_flight[color]--;
+
+       if (!delayed) {
+               cwq->nr_active--;
+               if (!list_empty(&cwq->delayed_works)) {
+                       /* one down, submit a delayed one */
+                       if (cwq->nr_active < cwq->max_active)
+                               cwq_activate_first_delayed(cwq);
+               }
+       }
+
+       /* is flush in progress and are we at the flushing tip? */
+       if (likely(cwq->flush_color != color))
+               return;
+
+       /* are there still in-flight works? */
+       if (cwq->nr_in_flight[color])
+               return;
+
+       /* this cwq is done, clear flush_color */
+       cwq->flush_color = -1;
+
+       /*
+        * If this was the last cwq, wake up the first flusher.  It
+        * will handle the rest.
+        */
+       if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
+               complete(&cwq->wq->first_flusher->done);
+}
+
+/*
+ * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
+ * so this work can't be re-armed in any way.
+ */
+static int try_to_grab_pending(struct work_struct *work)
+{
+       struct global_cwq *gcwq;
+       int ret = -1;
+
+       if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
+               return 0;
+
+       /*
+        * The queueing is in progress, or it is already queued. Try to
+        * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
+        */
+       gcwq = get_work_gcwq(work);
+       if (!gcwq)
+               return ret;
+
+       spin_lock_irq(&gcwq->lock);
+       if (!list_empty(&work->entry)) {
+               /*
+                * This work is queued, but perhaps we locked the wrong gcwq.
+                * In that case we must see the new value after rmb(), see
+                * insert_work()->wmb().
+                */
+               smp_rmb();
+               if (gcwq == get_work_gcwq(work)) {
+                       debug_work_deactivate(work);
+                       list_del_init(&work->entry);
+                       cwq_dec_nr_in_flight(get_work_cwq(work),
+                               get_work_color(work),
+                               *work_data_bits(work) & WORK_STRUCT_DELAYED);
+                       ret = 1;
+               }
+       }
+       spin_unlock_irq(&gcwq->lock);
+
+       return ret;
+}
+
 /**
  * insert_work - insert a work into gcwq
  * @cwq: cwq @work belongs to
@@ -981,7 +1126,14 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
        struct cpu_workqueue_struct *cwq;
        struct list_head *worklist;
        unsigned int work_flags;
-       unsigned long flags;
+
+       /*
+        * While a work item is PENDING && off queue, a task trying to
+        * steal the PENDING will busy-loop waiting for it to either get
+        * queued or lose PENDING.  Grabbing PENDING and queueing should
+        * happen with IRQ disabled.
+        */
+       WARN_ON_ONCE(!irqs_disabled());
 
        debug_work_activate(work);
 
@@ -994,7 +1146,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
        if (!(wq->flags & WQ_UNBOUND)) {
                struct global_cwq *last_gcwq;
 
-               if (unlikely(cpu == WORK_CPU_UNBOUND))
+               if (cpu == WORK_CPU_UNBOUND)
                        cpu = raw_smp_processor_id();
 
                /*
@@ -1008,7 +1160,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
                    (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
                        struct worker *worker;
 
-                       spin_lock_irqsave(&last_gcwq->lock, flags);
+                       spin_lock(&last_gcwq->lock);
 
                        worker = find_worker_executing_work(last_gcwq, work);
 
@@ -1016,14 +1168,15 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
                                gcwq = last_gcwq;
                        else {
                                /* meh... not running there, queue here */
-                               spin_unlock_irqrestore(&last_gcwq->lock, flags);
-                               spin_lock_irqsave(&gcwq->lock, flags);
+                               spin_unlock(&last_gcwq->lock);
+                               spin_lock(&gcwq->lock);
                        }
-               } else
-                       spin_lock_irqsave(&gcwq->lock, flags);
+               } else {
+                       spin_lock(&gcwq->lock);
+               }
        } else {
                gcwq = get_gcwq(WORK_CPU_UNBOUND);
-               spin_lock_irqsave(&gcwq->lock, flags);
+               spin_lock(&gcwq->lock);
        }
 
        /* gcwq determined, get cwq and queue */
@@ -1031,7 +1184,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
        trace_workqueue_queue_work(cpu, cwq, work);
 
        if (WARN_ON(!list_empty(&work->entry))) {
-               spin_unlock_irqrestore(&gcwq->lock, flags);
+               spin_unlock(&gcwq->lock);
                return;
        }
 
@@ -1049,79 +1202,64 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
 
        insert_work(cwq, work, worklist, work_flags);
 
-       spin_unlock_irqrestore(&gcwq->lock, flags);
+       spin_unlock(&gcwq->lock);
 }
 
-/**
- * queue_work - queue work on a workqueue
- * @wq: workqueue to use
- * @work: work to queue
- *
- * Returns 0 if @work was already on a queue, non-zero otherwise.
- *
- * We queue the work to the CPU on which it was submitted, but if the CPU dies
- * it can be processed by another CPU.
- */
-int queue_work(struct workqueue_struct *wq, struct work_struct *work)
-{
-       int ret;
-
-       ret = queue_work_on(get_cpu(), wq, work);
-       put_cpu();
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(queue_work);
-
 /**
  * queue_work_on - queue work on specific cpu
  * @cpu: CPU number to execute work on
  * @wq: workqueue to use
  * @work: work to queue
  *
- * Returns 0 if @work was already on a queue, non-zero otherwise.
+ * Returns %false if @work was already on a queue, %true otherwise.
  *
  * We queue the work to a specific CPU, the caller must ensure it
  * can't go away.
  */
-int
-queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
+bool queue_work_on(int cpu, struct workqueue_struct *wq,
+                  struct work_struct *work)
 {
-       int ret = 0;
+       bool ret = false;
+       unsigned long flags;
+
+       local_irq_save(flags);
 
        if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
                __queue_work(cpu, wq, work);
-               ret = 1;
+               ret = true;
        }
+
+       local_irq_restore(flags);
        return ret;
 }
 EXPORT_SYMBOL_GPL(queue_work_on);
 
-static void delayed_work_timer_fn(unsigned long __data)
-{
-       struct delayed_work *dwork = (struct delayed_work *)__data;
-       struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
-
-       __queue_work(smp_processor_id(), cwq->wq, &dwork->work);
-}
-
 /**
- * queue_delayed_work - queue work on a workqueue after delay
+ * queue_work - queue work on a workqueue
  * @wq: workqueue to use
- * @dwork: delayable work to queue
- * @delay: number of jiffies to wait before queueing
+ * @work: work to queue
+ *
+ * Returns %false if @work was already on a queue, %true otherwise.
  *
- * Returns 0 if @work was already on a queue, non-zero otherwise.
+ * We queue the work to the CPU on which it was submitted, but if the CPU dies
+ * it can be processed by another CPU.
  */
-int queue_delayed_work(struct workqueue_struct *wq,
-                       struct delayed_work *dwork, unsigned long delay)
+bool queue_work(struct workqueue_struct *wq, struct work_struct *work)
 {
-       if (delay == 0)
-               return queue_work(wq, &dwork->work);
+       return queue_work_on(WORK_CPU_UNBOUND, wq, work);
+}
+EXPORT_SYMBOL_GPL(queue_work);
+
+void delayed_work_timer_fn(unsigned long __data)
+{
+       struct delayed_work *dwork = (struct delayed_work *)__data;
+       struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
 
-       return queue_delayed_work_on(-1, wq, dwork, delay);
+       local_irq_disable();
+       __queue_work(WORK_CPU_UNBOUND, cwq->wq, &dwork->work);
+       local_irq_enable();
 }
-EXPORT_SYMBOL_GPL(queue_delayed_work);
+EXPORT_SYMBOL_GPL(delayed_work_timer_fn);
 
 /**
  * queue_delayed_work_on - queue work on specific CPU after delay
@@ -1130,18 +1268,29 @@ EXPORT_SYMBOL_GPL(queue_delayed_work);
  * @dwork: work to queue
  * @delay: number of jiffies to wait before queueing
  *
- * Returns 0 if @work was already on a queue, non-zero otherwise.
+ * Returns %false if @work was already on a queue, %true otherwise.  If
+ * @delay is zero and @dwork is idle, it will be scheduled for immediate
+ * execution.
  */
-int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
-                       struct delayed_work *dwork, unsigned long delay)
+bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
+                          struct delayed_work *dwork, unsigned long delay)
 {
-       int ret = 0;
        struct timer_list *timer = &dwork->timer;
        struct work_struct *work = &dwork->work;
+       bool ret = false;
+       unsigned long flags;
+
+       if (!delay)
+               return queue_work_on(cpu, wq, &dwork->work);
+
+       /* read the comment in __queue_work() */
+       local_irq_save(flags);
 
        if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
                unsigned int lcpu;
 
+               WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
+                            timer->data != (unsigned long)dwork);
                BUG_ON(timer_pending(timer));
                BUG_ON(!list_empty(&work->entry));
 
@@ -1165,19 +1314,34 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
                set_work_cwq(work, get_cwq(lcpu, wq), 0);
 
                timer->expires = jiffies + delay;
-               timer->data = (unsigned long)dwork;
-               timer->function = delayed_work_timer_fn;
 
-               if (unlikely(cpu >= 0))
+               if (unlikely(cpu != WORK_CPU_UNBOUND))
                        add_timer_on(timer, cpu);
                else
                        add_timer(timer);
-               ret = 1;
+               ret = true;
        }
+
+       local_irq_restore(flags);
        return ret;
 }
 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
 
+/**
+ * queue_delayed_work - queue work on a workqueue after delay
+ * @wq: workqueue to use
+ * @dwork: delayable work to queue
+ * @delay: number of jiffies to wait before queueing
+ *
+ * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
+ */
+bool queue_delayed_work(struct workqueue_struct *wq,
+                       struct delayed_work *dwork, unsigned long delay)
+{
+       return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
+}
+EXPORT_SYMBOL_GPL(queue_delayed_work);
+
 /**
  * worker_enter_idle - enter idle state
  * @worker: worker which is entering idle state
@@ -1810,107 +1974,6 @@ static bool manage_workers(struct worker *worker)
        return ret;
 }
 
-/**
- * move_linked_works - move linked works to a list
- * @work: start of series of works to be scheduled
- * @head: target list to append @work to
- * @nextp: out paramter for nested worklist walking
- *
- * Schedule linked works starting from @work to @head.  Work series to
- * be scheduled starts at @work and includes any consecutive work with
- * WORK_STRUCT_LINKED set in its predecessor.
- *
- * If @nextp is not NULL, it's updated to point to the next work of
- * the last scheduled work.  This allows move_linked_works() to be
- * nested inside outer list_for_each_entry_safe().
- *
- * CONTEXT:
- * spin_lock_irq(gcwq->lock).
- */
-static void move_linked_works(struct work_struct *work, struct list_head *head,
-                             struct work_struct **nextp)
-{
-       struct work_struct *n;
-
-       /*
-        * Linked worklist will always end before the end of the list,
-        * use NULL for list head.
-        */
-       list_for_each_entry_safe_from(work, n, NULL, entry) {
-               list_move_tail(&work->entry, head);
-               if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
-                       break;
-       }
-
-       /*
-        * If we're already inside safe list traversal and have moved
-        * multiple works to the scheduled queue, the next position
-        * needs to be updated.
-        */
-       if (nextp)
-               *nextp = n;
-}
-
-static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
-{
-       struct work_struct *work = list_first_entry(&cwq->delayed_works,
-                                                   struct work_struct, entry);
-
-       trace_workqueue_activate_work(work);
-       move_linked_works(work, &cwq->pool->worklist, NULL);
-       __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
-       cwq->nr_active++;
-}
-
-/**
- * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
- * @cwq: cwq of interest
- * @color: color of work which left the queue
- * @delayed: for a delayed work
- *
- * A work either has completed or is removed from pending queue,
- * decrement nr_in_flight of its cwq and handle workqueue flushing.
- *
- * CONTEXT:
- * spin_lock_irq(gcwq->lock).
- */
-static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
-                                bool delayed)
-{
-       /* ignore uncolored works */
-       if (color == WORK_NO_COLOR)
-               return;
-
-       cwq->nr_in_flight[color]--;
-
-       if (!delayed) {
-               cwq->nr_active--;
-               if (!list_empty(&cwq->delayed_works)) {
-                       /* one down, submit a delayed one */
-                       if (cwq->nr_active < cwq->max_active)
-                               cwq_activate_first_delayed(cwq);
-               }
-       }
-
-       /* is flush in progress and are we at the flushing tip? */
-       if (likely(cwq->flush_color != color))
-               return;
-
-       /* are there still in-flight works? */
-       if (cwq->nr_in_flight[color])
-               return;
-
-       /* this cwq is done, clear flush_color */
-       cwq->flush_color = -1;
-
-       /*
-        * If this was the last cwq, wake up the first flusher.  It
-        * will handle the rest.
-        */
-       if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
-               complete(&cwq->wq->first_flusher->done);
-}
-
 /**
  * process_one_work - process single work
  * @worker: self
@@ -1970,15 +2033,13 @@ __acquires(&gcwq->lock)
                return;
        }
 
-       /* claim and process */
+       /* claim and dequeue */
        debug_work_deactivate(work);
        hlist_add_head(&worker->hentry, bwh);
        worker->current_work = work;
        worker->current_cwq = cwq;
        work_color = get_work_color(work);
 
-       /* record the current cpu number in the work data and dequeue */
-       set_work_cpu(work, gcwq->cpu);
        list_del_init(&work->entry);
 
        /*
@@ -1995,9 +2056,19 @@ __acquires(&gcwq->lock)
        if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool))
                wake_up_worker(pool);
 
+       /*
+        * Record the last CPU and clear PENDING.  The following wmb is
+        * paired with the implied mb in test_and_set_bit(PENDING) and
+        * ensures all updates to @work made here are visible to and
+        * precede any updates by the next PENDING owner.  Also, clear
+        * PENDING inside @gcwq->lock so that PENDING and queued state
+        * changes happen together while IRQ is disabled.
+        */
+       smp_wmb();
+       set_work_cpu_and_clear_pending(work, gcwq->cpu);
+
        spin_unlock_irq(&gcwq->lock);
 
-       work_clear_pending(work);
        lock_map_acquire_read(&cwq->wq->lockdep_map);
        lock_map_acquire(&lockdep_map);
        trace_workqueue_execute_start(work);
@@ -2738,48 +2809,6 @@ bool flush_work_sync(struct work_struct *work)
 }
 EXPORT_SYMBOL_GPL(flush_work_sync);
 
-/*
- * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
- * so this work can't be re-armed in any way.
- */
-static int try_to_grab_pending(struct work_struct *work)
-{
-       struct global_cwq *gcwq;
-       int ret = -1;
-
-       if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
-               return 0;
-
-       /*
-        * The queueing is in progress, or it is already queued. Try to
-        * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
-        */
-       gcwq = get_work_gcwq(work);
-       if (!gcwq)
-               return ret;
-
-       spin_lock_irq(&gcwq->lock);
-       if (!list_empty(&work->entry)) {
-               /*
-                * This work is queued, but perhaps we locked the wrong gcwq.
-                * In that case we must see the new value after rmb(), see
-                * insert_work()->wmb().
-                */
-               smp_rmb();
-               if (gcwq == get_work_gcwq(work)) {
-                       debug_work_deactivate(work);
-                       list_del_init(&work->entry);
-                       cwq_dec_nr_in_flight(get_work_cwq(work),
-                               get_work_color(work),
-                               *work_data_bits(work) & WORK_STRUCT_DELAYED);
-                       ret = 1;
-               }
-       }
-       spin_unlock_irq(&gcwq->lock);
-
-       return ret;
-}
-
 static bool __cancel_work_timer(struct work_struct *work,
                                struct timer_list* timer)
 {
@@ -2834,9 +2863,11 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
  */
 bool flush_delayed_work(struct delayed_work *dwork)
 {
+       local_irq_disable();
        if (del_timer_sync(&dwork->timer))
-               __queue_work(raw_smp_processor_id(),
+               __queue_work(WORK_CPU_UNBOUND,
                             get_work_cwq(&dwork->work)->wq, &dwork->work);
+       local_irq_enable();
        return flush_work(&dwork->work);
 }
 EXPORT_SYMBOL(flush_delayed_work);
@@ -2855,9 +2886,11 @@ EXPORT_SYMBOL(flush_delayed_work);
  */
 bool flush_delayed_work_sync(struct delayed_work *dwork)
 {
+       local_irq_disable();
        if (del_timer_sync(&dwork->timer))
-               __queue_work(raw_smp_processor_id(),
+               __queue_work(WORK_CPU_UNBOUND,
                             get_work_cwq(&dwork->work)->wq, &dwork->work);
+       local_irq_enable();
        return flush_work_sync(&dwork->work);
 }
 EXPORT_SYMBOL(flush_delayed_work_sync);
@@ -2878,49 +2911,34 @@ bool cancel_delayed_work_sync(struct delayed_work *dwork)
 EXPORT_SYMBOL(cancel_delayed_work_sync);
 
 /**
- * schedule_work - put work task in global workqueue
- * @work: job to be done
- *
- * Returns zero if @work was already on the kernel-global workqueue and
- * non-zero otherwise.
- *
- * This puts a job in the kernel-global workqueue if it was not already
- * queued and leaves it in the same position on the kernel-global
- * workqueue otherwise.
- */
-int schedule_work(struct work_struct *work)
-{
-       return queue_work(system_wq, work);
-}
-EXPORT_SYMBOL(schedule_work);
-
-/*
  * schedule_work_on - put work task on a specific cpu
  * @cpu: cpu to put the work task on
  * @work: job to be done
  *
  * This puts a job on a specific cpu
  */
-int schedule_work_on(int cpu, struct work_struct *work)
+bool schedule_work_on(int cpu, struct work_struct *work)
 {
        return queue_work_on(cpu, system_wq, work);
 }
 EXPORT_SYMBOL(schedule_work_on);
 
 /**
- * schedule_delayed_work - put work task in global workqueue after delay
- * @dwork: job to be done
- * @delay: number of jiffies to wait or 0 for immediate execution
+ * schedule_work - put work task in global workqueue
+ * @work: job to be done
  *
- * After waiting for a given time this puts a job in the kernel-global
- * workqueue.
+ * Returns %false if @work was already on the kernel-global workqueue and
+ * %true otherwise.
+ *
+ * This puts a job in the kernel-global workqueue if it was not already
+ * queued and leaves it in the same position on the kernel-global
+ * workqueue otherwise.
  */
-int schedule_delayed_work(struct delayed_work *dwork,
-                                       unsigned long delay)
+bool schedule_work(struct work_struct *work)
 {
-       return queue_delayed_work(system_wq, dwork, delay);
+       return queue_work(system_wq, work);
 }
-EXPORT_SYMBOL(schedule_delayed_work);
+EXPORT_SYMBOL(schedule_work);
 
 /**
  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
@@ -2931,13 +2949,27 @@ EXPORT_SYMBOL(schedule_delayed_work);
  * After waiting for a given time this puts a job in the kernel-global
  * workqueue on the specified CPU.
  */
-int schedule_delayed_work_on(int cpu,
-                       struct delayed_work *dwork, unsigned long delay)
+bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
+                             unsigned long delay)
 {
        return queue_delayed_work_on(cpu, system_wq, dwork, delay);
 }
 EXPORT_SYMBOL(schedule_delayed_work_on);
 
+/**
+ * schedule_delayed_work - put work task in global workqueue after delay
+ * @dwork: job to be done
+ * @delay: number of jiffies to wait or 0 for immediate execution
+ *
+ * After waiting for a given time this puts a job in the kernel-global
+ * workqueue.
+ */
+bool schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
+{
+       return queue_delayed_work(system_wq, dwork, delay);
+}
+EXPORT_SYMBOL(schedule_delayed_work);
+
 /**
  * schedule_on_each_cpu - execute a function synchronously on each online CPU
  * @func: the function to call