Merge tag 'rproc-v4.15' of git://github.com/andersson/remoteproc
[linux-2.6-block.git] / block / blk-mq.c
index 6eacc1dea8b74c9355117c251c5663bea0e946b7..11097477eeab6591088ca817d4690535e114e699 100644 (file)
@@ -61,10 +61,10 @@ static int blk_mq_poll_stats_bkt(const struct request *rq)
 /*
  * Check if any of the ctx's have pending work in this hardware queue
  */
-bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
+static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
 {
-       return sbitmap_any_bit_set(&hctx->ctx_map) ||
-                       !list_empty_careful(&hctx->dispatch) ||
+       return !list_empty_careful(&hctx->dispatch) ||
+               sbitmap_any_bit_set(&hctx->ctx_map) ||
                        blk_mq_sched_has_work(hctx);
 }
 
@@ -126,7 +126,8 @@ void blk_freeze_queue_start(struct request_queue *q)
        freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
        if (freeze_depth == 1) {
                percpu_ref_kill(&q->q_usage_counter);
-               blk_mq_run_hw_queues(q, false);
+               if (q->mq_ops)
+                       blk_mq_run_hw_queues(q, false);
        }
 }
 EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
@@ -256,13 +257,6 @@ void blk_mq_wake_waiters(struct request_queue *q)
        queue_for_each_hw_ctx(q, hctx, i)
                if (blk_mq_hw_queue_mapped(hctx))
                        blk_mq_tag_wakeup_all(hctx->tags, true);
-
-       /*
-        * If we are called because the queue has now been marked as
-        * dying, we need to ensure that processes currently waiting on
-        * the queue are notified as well.
-        */
-       wake_up_all(&q->mq_freeze_wq);
 }
 
 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
@@ -297,6 +291,8 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
        rq->q = data->q;
        rq->mq_ctx = data->ctx;
        rq->cmd_flags = op;
+       if (data->flags & BLK_MQ_REQ_PREEMPT)
+               rq->rq_flags |= RQF_PREEMPT;
        if (blk_queue_io_stat(data->q))
                rq->rq_flags |= RQF_IO_STAT;
        /* do not touch atomic flags, it needs atomic ops against the timer */
@@ -387,13 +383,13 @@ static struct request *blk_mq_get_request(struct request_queue *q,
 }
 
 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
-               unsigned int flags)
+               blk_mq_req_flags_t flags)
 {
        struct blk_mq_alloc_data alloc_data = { .flags = flags };
        struct request *rq;
        int ret;
 
-       ret = blk_queue_enter(q, flags & BLK_MQ_REQ_NOWAIT);
+       ret = blk_queue_enter(q, flags);
        if (ret)
                return ERR_PTR(ret);
 
@@ -413,7 +409,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
 EXPORT_SYMBOL(blk_mq_alloc_request);
 
 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
-               unsigned int op, unsigned int flags, unsigned int hctx_idx)
+       unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
 {
        struct blk_mq_alloc_data alloc_data = { .flags = flags };
        struct request *rq;
@@ -432,7 +428,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
        if (hctx_idx >= q->nr_hw_queues)
                return ERR_PTR(-EIO);
 
-       ret = blk_queue_enter(q, true);
+       ret = blk_queue_enter(q, flags);
        if (ret)
                return ERR_PTR(ret);
 
@@ -653,6 +649,8 @@ static void __blk_mq_requeue_request(struct request *rq)
 {
        struct request_queue *q = rq->q;
 
+       blk_mq_put_driver_tag(rq);
+
        trace_block_rq_requeue(q, rq);
        wbt_requeue(q->rq_wb, &rq->issue_stat);
        blk_mq_sched_requeue_request(rq);
@@ -709,7 +707,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
 
        /*
         * We abuse this flag that is otherwise used by the I/O scheduler to
-        * request head insertation from the workqueue.
+        * request head insertion from the workqueue.
         */
        BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
 
@@ -996,105 +994,88 @@ done:
        return rq->tag != -1;
 }
 
-static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
-                                   struct request *rq)
-{
-       blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
-       rq->tag = -1;
-
-       if (rq->rq_flags & RQF_MQ_INFLIGHT) {
-               rq->rq_flags &= ~RQF_MQ_INFLIGHT;
-               atomic_dec(&hctx->nr_active);
-       }
-}
-
-static void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
-                                      struct request *rq)
-{
-       if (rq->tag == -1 || rq->internal_tag == -1)
-               return;
-
-       __blk_mq_put_driver_tag(hctx, rq);
-}
-
-static void blk_mq_put_driver_tag(struct request *rq)
+static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
+                               int flags, void *key)
 {
        struct blk_mq_hw_ctx *hctx;
 
-       if (rq->tag == -1 || rq->internal_tag == -1)
-               return;
+       hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
 
-       hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
-       __blk_mq_put_driver_tag(hctx, rq);
+       list_del_init(&wait->entry);
+       blk_mq_run_hw_queue(hctx, true);
+       return 1;
 }
 
 /*
- * If we fail getting a driver tag because all the driver tags are already
- * assigned and on the dispatch list, BUT the first entry does not have a
- * tag, then we could deadlock. For that case, move entries with assigned
- * driver tags to the front, leaving the set of tagged requests in the
- * same order, and the untagged set in the same order.
+ * Mark us waiting for a tag. For shared tags, this involves hooking us into
+ * the tag wakeups. For non-shared tags, we can simply mark us nedeing a
+ * restart. For both caes, take care to check the condition again after
+ * marking us as waiting.
  */
-static bool reorder_tags_to_front(struct list_head *list)
+static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx **hctx,
+                                struct request *rq)
 {
-       struct request *rq, *tmp, *first = NULL;
-
-       list_for_each_entry_safe_reverse(rq, tmp, list, queuelist) {
-               if (rq == first)
-                       break;
-               if (rq->tag != -1) {
-                       list_move(&rq->queuelist, list);
-                       if (!first)
-                               first = rq;
-               }
-       }
-
-       return first != NULL;
-}
-
-static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
-                               void *key)
-{
-       struct blk_mq_hw_ctx *hctx;
+       struct blk_mq_hw_ctx *this_hctx = *hctx;
+       bool shared_tags = (this_hctx->flags & BLK_MQ_F_TAG_SHARED) != 0;
+       struct sbq_wait_state *ws;
+       wait_queue_entry_t *wait;
+       bool ret;
 
-       hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
+       if (!shared_tags) {
+               if (!test_bit(BLK_MQ_S_SCHED_RESTART, &this_hctx->state))
+                       set_bit(BLK_MQ_S_SCHED_RESTART, &this_hctx->state);
+       } else {
+               wait = &this_hctx->dispatch_wait;
+               if (!list_empty_careful(&wait->entry))
+                       return false;
 
-       list_del(&wait->entry);
-       clear_bit_unlock(BLK_MQ_S_TAG_WAITING, &hctx->state);
-       blk_mq_run_hw_queue(hctx, true);
-       return 1;
-}
+               spin_lock(&this_hctx->lock);
+               if (!list_empty(&wait->entry)) {
+                       spin_unlock(&this_hctx->lock);
+                       return false;
+               }
 
-static bool blk_mq_dispatch_wait_add(struct blk_mq_hw_ctx *hctx)
-{
-       struct sbq_wait_state *ws;
+               ws = bt_wait_ptr(&this_hctx->tags->bitmap_tags, this_hctx);
+               add_wait_queue(&ws->wait, wait);
+       }
 
        /*
-        * The TAG_WAITING bit serves as a lock protecting hctx->dispatch_wait.
-        * The thread which wins the race to grab this bit adds the hardware
-        * queue to the wait queue.
+        * It's possible that a tag was freed in the window between the
+        * allocation failure and adding the hardware queue to the wait
+        * queue.
         */
-       if (test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state) ||
-           test_and_set_bit_lock(BLK_MQ_S_TAG_WAITING, &hctx->state))
-               return false;
+       ret = blk_mq_get_driver_tag(rq, hctx, false);
 
-       init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
-       ws = bt_wait_ptr(&hctx->tags->bitmap_tags, hctx);
+       if (!shared_tags) {
+               /*
+                * Don't clear RESTART here, someone else could have set it.
+                * At most this will cost an extra queue run.
+                */
+               return ret;
+       } else {
+               if (!ret) {
+                       spin_unlock(&this_hctx->lock);
+                       return false;
+               }
 
-       /*
-        * As soon as this returns, it's no longer safe to fiddle with
-        * hctx->dispatch_wait, since a completion can wake up the wait queue
-        * and unlock the bit.
-        */
-       add_wait_queue(&ws->wait, &hctx->dispatch_wait);
-       return true;
+               /*
+                * We got a tag, remove ourselves from the wait queue to ensure
+                * someone else gets the wakeup.
+                */
+               spin_lock_irq(&ws->wait.lock);
+               list_del_init(&wait->entry);
+               spin_unlock_irq(&ws->wait.lock);
+               spin_unlock(&this_hctx->lock);
+               return true;
+       }
 }
 
 bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
-               bool got_budget)
+                            bool got_budget)
 {
        struct blk_mq_hw_ctx *hctx;
        struct request *rq, *nxt;
+       bool no_tag = false;
        int errors, queued;
 
        if (list_empty(list))
@@ -1112,33 +1093,30 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
 
                rq = list_first_entry(list, struct request, queuelist);
                if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
-                       if (!queued && reorder_tags_to_front(list))
-                               continue;
-
                        /*
                         * The initial allocation attempt failed, so we need to
-                        * rerun the hardware queue when a tag is freed.
-                        */
-                       if (!blk_mq_dispatch_wait_add(hctx)) {
-                               if (got_budget)
-                                       blk_mq_put_dispatch_budget(hctx);
-                               break;
-                       }
-
-                       /*
-                        * It's possible that a tag was freed in the window
-                        * between the allocation failure and adding the
-                        * hardware queue to the wait queue.
+                        * rerun the hardware queue when a tag is freed. The
+                        * waitqueue takes care of that. If the queue is run
+                        * before we add this entry back on the dispatch list,
+                        * we'll re-run it below.
                         */
-                       if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
+                       if (!blk_mq_mark_tag_wait(&hctx, rq)) {
                                if (got_budget)
                                        blk_mq_put_dispatch_budget(hctx);
+                               /*
+                                * For non-shared tags, the RESTART check
+                                * will suffice.
+                                */
+                               if (hctx->flags & BLK_MQ_F_TAG_SHARED)
+                                       no_tag = true;
                                break;
                        }
                }
 
-               if (!got_budget && !blk_mq_get_dispatch_budget(hctx))
+               if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) {
+                       blk_mq_put_driver_tag(rq);
                        break;
+               }
 
                list_del_init(&rq->queuelist);
 
@@ -1159,13 +1137,13 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
                if (ret == BLK_STS_RESOURCE) {
                        /*
                         * If an I/O scheduler has been configured and we got a
-                        * driver tag for the next request already, free it again.
+                        * driver tag for the next request already, free it
+                        * again.
                         */
                        if (!list_empty(list)) {
                                nxt = list_first_entry(list, struct request, queuelist);
                                blk_mq_put_driver_tag(nxt);
                        }
-                       blk_mq_put_driver_tag_hctx(hctx, rq);
                        list_add(&rq->queuelist, list);
                        __blk_mq_requeue_request(rq);
                        break;
@@ -1196,10 +1174,10 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
                 * it is no longer set that means that it was cleared by another
                 * thread and hence that a queue rerun is needed.
                 *
-                * If TAG_WAITING is set that means that an I/O scheduler has
-                * been configured and another thread is waiting for a driver
-                * tag. To guarantee fairness, do not rerun this hardware queue
-                * but let the other thread grab the driver tag.
+                * If 'no_tag' is set, that means that we failed getting
+                * a driver tag with an I/O scheduler attached. If our dispatch
+                * waitqueue is no longer active, ensure that we run the queue
+                * AFTER adding our entries back to the list.
                 *
                 * If no I/O scheduler has been configured it is possible that
                 * the hardware queue got stopped and restarted before requests
@@ -1211,8 +1189,8 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
                 *   returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
                 *   and dm-rq.
                 */
-               if (!blk_mq_sched_needs_restart(hctx) &&
-                   !test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state))
+               if (!blk_mq_sched_needs_restart(hctx) ||
+                   (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
                        blk_mq_run_hw_queue(hctx, true);
        }
 
@@ -1305,9 +1283,14 @@ void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
 }
 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
 
-void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
+bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
 {
-       __blk_mq_delay_run_hw_queue(hctx, async, 0);
+       if (blk_mq_hctx_has_pending(hctx)) {
+               __blk_mq_delay_run_hw_queue(hctx, async, 0);
+               return true;
+       }
+
+       return false;
 }
 EXPORT_SYMBOL(blk_mq_run_hw_queue);
 
@@ -1317,8 +1300,7 @@ void blk_mq_run_hw_queues(struct request_queue *q, bool async)
        int i;
 
        queue_for_each_hw_ctx(q, hctx, i) {
-               if (!blk_mq_hctx_has_pending(hctx) ||
-                   blk_mq_hctx_stopped(hctx))
+               if (blk_mq_hctx_stopped(hctx))
                        continue;
 
                blk_mq_run_hw_queue(hctx, async);
@@ -1492,7 +1474,7 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
  * Should only be used carefully, when the caller knows we want to
  * bypass a potential IO scheduler on the target device.
  */
-void blk_mq_request_bypass_insert(struct request *rq)
+void blk_mq_request_bypass_insert(struct request *rq, bool run_queue)
 {
        struct blk_mq_ctx *ctx = rq->mq_ctx;
        struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu);
@@ -1501,7 +1483,8 @@ void blk_mq_request_bypass_insert(struct request *rq)
        list_add_tail(&rq->queuelist, &hctx->dispatch);
        spin_unlock(&hctx->lock);
 
-       blk_mq_run_hw_queue(hctx, false);
+       if (run_queue)
+               blk_mq_run_hw_queue(hctx, false);
 }
 
 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
@@ -1729,13 +1712,10 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
        if (unlikely(is_flush_fua)) {
                blk_mq_put_ctx(data.ctx);
                blk_mq_bio_to_request(rq, bio);
-               if (q->elevator) {
-                       blk_mq_sched_insert_request(rq, false, true, true,
-                                       true);
-               } else {
-                       blk_insert_flush(rq);
-                       blk_mq_run_hw_queue(data.hctx, true);
-               }
+
+               /* bypass scheduler for flush rq */
+               blk_insert_flush(rq);
+               blk_mq_run_hw_queue(data.hctx, true);
        } else if (plug && q->nr_hw_queues == 1) {
                struct request *last = NULL;
 
@@ -2067,7 +2047,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
         * Allocate space for all possible cpus to avoid allocation at
         * runtime
         */
-       hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
+       hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
                                        GFP_KERNEL, node);
        if (!hctx->ctxs)
                goto unregister_cpu_notifier;
@@ -2078,6 +2058,9 @@ static int blk_mq_init_hctx(struct request_queue *q,
 
        hctx->nr_ctx = 0;
 
+       init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
+       INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
+
        if (set->ops->init_hctx &&
            set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
                goto free_bitmap;
@@ -2317,8 +2300,11 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
 
        mutex_lock(&set->tag_list_lock);
 
-       /* Check to see if we're transitioning to shared (from 1 to 2 queues). */
-       if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
+       /*
+        * Check to see if we're transitioning to shared (from 1 to 2 queues).
+        */
+       if (!list_empty(&set->tag_list) &&
+           !(set->flags & BLK_MQ_F_TAG_SHARED)) {
                set->flags |= BLK_MQ_F_TAG_SHARED;
                /* update existing queue */
                blk_mq_update_tag_set_depth(set, true);
@@ -2550,10 +2536,9 @@ static void blk_mq_queue_reinit(struct request_queue *q)
 
        /*
         * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
-        * we should change hctx numa_node according to new topology (this
-        * involves free and re-allocate memory, worthy doing?)
+        * we should change hctx numa_node according to the new topology (this
+        * involves freeing and re-allocating memory, worth doing?)
         */
-
        blk_mq_map_swqueue(q);
 
        blk_mq_sysfs_register(q);