block: get rid of q->softirq_done_fn()
authorJens Axboe <axboe@kernel.dk>
Wed, 31 Oct 2018 15:43:30 +0000 (09:43 -0600)
committerJens Axboe <axboe@kernel.dk>
Wed, 7 Nov 2018 20:42:33 +0000 (13:42 -0700)
With the legacy path gone, all we do is funnel it through the
mq_ops->complete() operation.

Tested-by: Ming Lei <ming.lei@redhat.com>
Reviewed-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq.c
block/blk-settings.c
block/blk-softirq.c
include/linux/blk-mq.h
include/linux/blkdev.h

index b49f5bd86f420d34457bad65db521c8108eb5e52..5e7982918c54c9c7cf7304fcd434559de9df0309 100644 (file)
@@ -546,13 +546,15 @@ EXPORT_SYMBOL(blk_mq_end_request);
 static void __blk_mq_complete_request_remote(void *data)
 {
        struct request *rq = data;
+       struct request_queue *q = rq->q;
 
-       rq->q->softirq_done_fn(rq);
+       q->mq_ops->complete(rq);
 }
 
 static void __blk_mq_complete_request(struct request *rq)
 {
        struct blk_mq_ctx *ctx = rq->mq_ctx;
+       struct request_queue *q = rq->q;
        bool shared = false;
        int cpu;
 
@@ -568,18 +570,18 @@ static void __blk_mq_complete_request(struct request *rq)
         * So complete IO reqeust in softirq context in case of single queue
         * for not degrading IO performance by irqsoff latency.
         */
-       if (rq->q->nr_hw_queues == 1) {
+       if (q->nr_hw_queues == 1) {
                __blk_complete_request(rq);
                return;
        }
 
-       if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
-               rq->q->softirq_done_fn(rq);
+       if (!test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) {
+               q->mq_ops->complete(rq);
                return;
        }
 
        cpu = get_cpu();
-       if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
+       if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags))
                shared = cpus_share_cache(cpu, ctx->cpu);
 
        if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
@@ -588,7 +590,7 @@ static void __blk_mq_complete_request(struct request *rq)
                rq->csd.flags = 0;
                smp_call_function_single_async(ctx->cpu, &rq->csd);
        } else {
-               rq->q->softirq_done_fn(rq);
+               q->mq_ops->complete(rq);
        }
        put_cpu();
 }
@@ -2701,9 +2703,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
         */
        q->poll_nsec = -1;
 
-       if (set->ops->complete)
-               blk_queue_softirq_done(q, set->ops->complete);
-
        blk_mq_init_cpu_queues(q, set->nr_hw_queues);
        blk_mq_add_queue_tag_set(set, q);
        blk_mq_map_swqueue(q);
index e3f07d94b18d8d7e828079e1aae058a9a0f339dc..cca83590a1dcbfca30eddb417121ed9c75cad66e 100644 (file)
@@ -20,12 +20,6 @@ EXPORT_SYMBOL(blk_max_low_pfn);
 
 unsigned long blk_max_pfn;
 
-void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
-{
-       q->softirq_done_fn = fn;
-}
-EXPORT_SYMBOL(blk_queue_softirq_done);
-
 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
 {
        q->rq_timeout = timeout;
index 8ca0f6caf174c253e5186b31d891cfa40d878177..727d64436ec4a2bb00477af835e0859d1ab924b4 100644 (file)
@@ -34,7 +34,7 @@ static __latent_entropy void blk_done_softirq(struct softirq_action *h)
 
                rq = list_entry(local_list.next, struct request, ipi_list);
                list_del_init(&rq->ipi_list);
-               rq->q->softirq_done_fn(rq);
+               rq->q->mq_ops->complete(rq);
        }
 }
 
@@ -102,7 +102,7 @@ void __blk_complete_request(struct request *req)
        unsigned long flags;
        bool shared = false;
 
-       BUG_ON(!q->softirq_done_fn);
+       BUG_ON(!q->mq_ops->complete);
 
        local_irq_save(flags);
        cpu = smp_processor_id();
index 5c8418ebbfd6df6491ac7b4c3f871f989b96a47f..9dd574e5436a3e9770ffe65c935b048d50088bd3 100644 (file)
@@ -115,6 +115,7 @@ typedef void (busy_tag_iter_fn)(struct request *, void *, bool);
 typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int);
 typedef int (map_queues_fn)(struct blk_mq_tag_set *set);
 typedef bool (busy_fn)(struct request_queue *);
+typedef void (complete_fn)(struct request *);
 
 
 struct blk_mq_ops {
@@ -142,7 +143,7 @@ struct blk_mq_ops {
         */
        poll_fn                 *poll;
 
-       softirq_done_fn         *complete;
+       complete_fn             *complete;
 
        /*
         * Called when the block layer side of a hardware queue has been
index c675e2b5af620b99a182cccee8670c38917cd304..d4104844d6bbdeb517fb8c93b793ef513c074304 100644 (file)
@@ -290,7 +290,6 @@ typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
 typedef bool (poll_q_fn) (struct request_queue *q, blk_qc_t);
 
 struct bio_vec;
-typedef void (softirq_done_fn)(struct request *);
 typedef int (dma_drain_needed_fn)(struct request *);
 
 enum blk_eh_timer_return {
@@ -407,7 +406,6 @@ struct request_queue {
 
        make_request_fn         *make_request_fn;
        poll_q_fn               *poll_fn;
-       softirq_done_fn         *softirq_done_fn;
        dma_drain_needed_fn     *dma_drain_needed;
 
        const struct blk_mq_ops *mq_ops;
@@ -1113,7 +1111,6 @@ extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
 extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
 extern void blk_queue_dma_alignment(struct request_queue *, int);
 extern void blk_queue_update_dma_alignment(struct request_queue *, int);
-extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
 extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
 extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);