block: remove QUEUE_FLAG_BYPASS and ->bypass
authorChristoph Hellwig <hch@lst.de>
Wed, 14 Nov 2018 16:02:04 +0000 (17:02 +0100)
committerJens Axboe <axboe@kernel.dk>
Thu, 15 Nov 2018 19:13:15 +0000 (12:13 -0700)
Unused since the removal of the legacy request code.

Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-cgroup.c
block/blk-core.c
block/blk-mq-debugfs.c
block/blk-throttle.c
include/linux/blk-cgroup.h
include/linux/blkdev.h

index 6c65791bc3fee62ee7f9ed2612b14da2963f0467..a95cddb39f1c5cde50f3a2f6e23072df93b4ea37 100644 (file)
@@ -270,13 +270,6 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
        WARN_ON_ONCE(!rcu_read_lock_held());
        lockdep_assert_held(q->queue_lock);
 
-       /*
-        * This could be the first entry point of blkcg implementation and
-        * we shouldn't allow anything to go through for a bypassing queue.
-        */
-       if (unlikely(blk_queue_bypass(q)))
-               return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
-
        blkg = __blkg_lookup(blkcg, q, true);
        if (blkg)
                return blkg;
@@ -741,14 +734,6 @@ static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
 
        if (!blkcg_policy_enabled(q, pol))
                return ERR_PTR(-EOPNOTSUPP);
-
-       /*
-        * This could be the first entry point of blkcg implementation and
-        * we shouldn't allow anything to go through for a bypassing queue.
-        */
-       if (unlikely(blk_queue_bypass(q)))
-               return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
-
        return __blkg_lookup(blkcg, q, true /* update_hint */);
 }
 
index fdc0ad2686c4b32f50c8466f7c0ad5f6aba2d3d8..1c9b6975cf0a9c2df6c664009d01bfd0e1da6d28 100644 (file)
@@ -370,18 +370,6 @@ void blk_cleanup_queue(struct request_queue *q)
        blk_set_queue_dying(q);
        spin_lock_irq(lock);
 
-       /*
-        * A dying queue is permanently in bypass mode till released.  Note
-        * that, unlike blk_queue_bypass_start(), we aren't performing
-        * synchronize_rcu() after entering bypass mode to avoid the delay
-        * as some drivers create and destroy a lot of queues while
-        * probing.  This is still safe because blk_release_queue() will be
-        * called only after the queue refcnt drops to zero and nothing,
-        * RCU or not, would be traversing the queue by then.
-        */
-       q->bypass_depth++;
-       queue_flag_set(QUEUE_FLAG_BYPASS, q);
-
        queue_flag_set(QUEUE_FLAG_NOMERGES, q);
        queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
        queue_flag_set(QUEUE_FLAG_DYING, q);
@@ -589,15 +577,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
 
        q->queue_lock = lock ? : &q->__queue_lock;
 
-       /*
-        * A queue starts its life with bypass turned on to avoid
-        * unnecessary bypass on/off overhead and nasty surprises during
-        * init.  The initial bypass will be finished when the queue is
-        * registered by blk_register_queue().
-        */
-       q->bypass_depth = 1;
-       queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q);
-
        init_waitqueue_head(&q->mq_freeze_wq);
 
        /*
index f021f4817b80e6f536176b1e9650b14a1c49c207..a32bb79d6c95de7aba70c58653b77eb0349e9409 100644 (file)
@@ -114,7 +114,6 @@ static int queue_pm_only_show(void *data, struct seq_file *m)
 static const char *const blk_queue_flag_name[] = {
        QUEUE_FLAG_NAME(STOPPED),
        QUEUE_FLAG_NAME(DYING),
-       QUEUE_FLAG_NAME(BYPASS),
        QUEUE_FLAG_NAME(BIDI),
        QUEUE_FLAG_NAME(NOMERGES),
        QUEUE_FLAG_NAME(SAME_COMP),
index db1a3a2ae00617fbe1e4804bbfd327e37ce55737..8e6f3c9821c26d395878ad4950c4e28d6e72d89b 100644 (file)
@@ -2145,9 +2145,6 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
 
        throtl_update_latency_buckets(td);
 
-       if (unlikely(blk_queue_bypass(q)))
-               goto out_unlock;
-
        blk_throtl_assoc_bio(tg, bio);
        blk_throtl_update_idletime(tg);
 
index 1b299e025e8307dd5291f174f3a12a6a03fb598f..2c68efc603bdfc4643f53ae238aa135eff1844d7 100644 (file)
@@ -325,16 +325,12 @@ static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
  * @q: request_queue of interest
  *
  * Lookup blkg for the @blkcg - @q pair.  This function should be called
- * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
- * - see blk_queue_bypass_start() for details.
+ * under RCU read loc.
  */
 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
                                           struct request_queue *q)
 {
        WARN_ON_ONCE(!rcu_read_lock_held());
-
-       if (unlikely(blk_queue_bypass(q)))
-               return NULL;
        return __blkg_lookup(blkcg, q, false);
 }
 
index c961329be96b2348e79faaa05d35798203e84397..dd1e53fd4acfc53cb114a9db045fdfa132369d48 100644 (file)
@@ -548,7 +548,6 @@ struct request_queue {
 
        struct mutex            sysfs_lock;
 
-       int                     bypass_depth;
        atomic_t                mq_freeze_depth;
 
 #if defined(CONFIG_BLK_DEV_BSG)
@@ -586,7 +585,6 @@ struct request_queue {
 
 #define QUEUE_FLAG_STOPPED     1       /* queue is stopped */
 #define QUEUE_FLAG_DYING       2       /* queue being torn down */
-#define QUEUE_FLAG_BYPASS      3       /* act as dumb FIFO queue */
 #define QUEUE_FLAG_BIDI                4       /* queue supports bidi requests */
 #define QUEUE_FLAG_NOMERGES     5      /* disable merge attempts */
 #define QUEUE_FLAG_SAME_COMP   6       /* complete on same CPU-group */
@@ -630,7 +628,6 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
 #define blk_queue_stopped(q)   test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
 #define blk_queue_dying(q)     test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
 #define blk_queue_dead(q)      test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
-#define blk_queue_bypass(q)    test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
 #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
 #define blk_queue_nomerges(q)  test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
 #define blk_queue_noxmerges(q) \