block: Introduce blk_queue_flag_{set,clear,test_and_{set,clear}}()
authorBart Van Assche <bart.vanassche@wdc.com>
Thu, 8 Mar 2018 01:10:04 +0000 (17:10 -0800)
committerJens Axboe <axboe@kernel.dk>
Thu, 8 Mar 2018 21:13:48 +0000 (14:13 -0700)
Introduce functions that modify the queue flags and that protect
these modifications with the request queue lock. Except for moving
one wake_up_all() call from inside to outside a critical section,
this patch does not change any functionality.

Cc: Christoph Hellwig <hch@lst.de>
Cc: Hannes Reinecke <hare@suse.de>
Cc: Ming Lei <ming.lei@redhat.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-core.c
block/blk-mq.c
block/blk-settings.c
block/blk-sysfs.c
block/blk-timeout.c
include/linux/blkdev.h

index 241b730886174793ab7a4ddae3dbb3cbd09f7f68..74c6283f45092afda52bab6873754048a44c7806 100644 (file)
@@ -71,6 +71,78 @@ struct kmem_cache *blk_requestq_cachep;
  */
 static struct workqueue_struct *kblockd_workqueue;
 
+/**
+ * blk_queue_flag_set - atomically set a queue flag
+ * @flag: flag to be set
+ * @q: request queue
+ */
+void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(q->queue_lock, flags);
+       queue_flag_set(flag, q);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+}
+EXPORT_SYMBOL(blk_queue_flag_set);
+
+/**
+ * blk_queue_flag_clear - atomically clear a queue flag
+ * @flag: flag to be cleared
+ * @q: request queue
+ */
+void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(q->queue_lock, flags);
+       queue_flag_clear(flag, q);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+}
+EXPORT_SYMBOL(blk_queue_flag_clear);
+
+/**
+ * blk_queue_flag_test_and_set - atomically test and set a queue flag
+ * @flag: flag to be set
+ * @q: request queue
+ *
+ * Returns the previous value of @flag - 0 if the flag was not set and 1 if
+ * the flag was already set.
+ */
+bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
+{
+       unsigned long flags;
+       bool res;
+
+       spin_lock_irqsave(q->queue_lock, flags);
+       res = queue_flag_test_and_set(flag, q);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+
+       return res;
+}
+EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
+
+/**
+ * blk_queue_flag_test_and_clear - atomically test and clear a queue flag
+ * @flag: flag to be cleared
+ * @q: request queue
+ *
+ * Returns the previous value of @flag - 0 if the flag was not set and 1 if
+ * the flag was set.
+ */
+bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q)
+{
+       unsigned long flags;
+       bool res;
+
+       spin_lock_irqsave(q->queue_lock, flags);
+       res = queue_flag_test_and_clear(flag, q);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+
+       return res;
+}
+EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_clear);
+
 static void blk_clear_congested(struct request_list *rl, int sync)
 {
 #ifdef CONFIG_CGROUP_WRITEBACK
@@ -361,25 +433,14 @@ EXPORT_SYMBOL(blk_sync_queue);
  */
 int blk_set_preempt_only(struct request_queue *q)
 {
-       unsigned long flags;
-       int res;
-
-       spin_lock_irqsave(q->queue_lock, flags);
-       res = queue_flag_test_and_set(QUEUE_FLAG_PREEMPT_ONLY, q);
-       spin_unlock_irqrestore(q->queue_lock, flags);
-
-       return res;
+       return blk_queue_flag_test_and_set(QUEUE_FLAG_PREEMPT_ONLY, q);
 }
 EXPORT_SYMBOL_GPL(blk_set_preempt_only);
 
 void blk_clear_preempt_only(struct request_queue *q)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(q->queue_lock, flags);
-       queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q);
+       blk_queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q);
        wake_up_all(&q->mq_freeze_wq);
-       spin_unlock_irqrestore(q->queue_lock, flags);
 }
 EXPORT_SYMBOL_GPL(blk_clear_preempt_only);
 
@@ -629,9 +690,7 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
 
 void blk_set_queue_dying(struct request_queue *q)
 {
-       spin_lock_irq(q->queue_lock);
-       queue_flag_set(QUEUE_FLAG_DYING, q);
-       spin_unlock_irq(q->queue_lock);
+       blk_queue_flag_set(QUEUE_FLAG_DYING, q);
 
        /*
         * When queue DYING flag is set, we need to block new req
index e70cc7d48f582c58fca9c6d1647dbfe7462b7e2d..a868990226839a34aee533bba3367bce5ab8d216 100644 (file)
@@ -194,11 +194,7 @@ EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
  */
 void blk_mq_quiesce_queue_nowait(struct request_queue *q)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(q->queue_lock, flags);
-       queue_flag_set(QUEUE_FLAG_QUIESCED, q);
-       spin_unlock_irqrestore(q->queue_lock, flags);
+       blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
 }
 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
 
@@ -239,11 +235,7 @@ EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
  */
 void blk_mq_unquiesce_queue(struct request_queue *q)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(q->queue_lock, flags);
-       queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
-       spin_unlock_irqrestore(q->queue_lock, flags);
+       blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
 
        /* dispatch requests which are inserted during quiescing */
        blk_mq_run_hw_queues(q, true);
index 7f719da0eaddc178833db8b5244dc6f6c7c25f3a..d1de71124656a911e09af299f34d0aad87b94fca 100644 (file)
@@ -859,12 +859,10 @@ EXPORT_SYMBOL(blk_queue_update_dma_alignment);
 
 void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
 {
-       spin_lock_irq(q->queue_lock);
        if (queueable)
-               queue_flag_clear(QUEUE_FLAG_FLUSH_NQ, q);
+               blk_queue_flag_clear(QUEUE_FLAG_FLUSH_NQ, q);
        else
-               queue_flag_set(QUEUE_FLAG_FLUSH_NQ, q);
-       spin_unlock_irq(q->queue_lock);
+               blk_queue_flag_set(QUEUE_FLAG_FLUSH_NQ, q);
 }
 EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
 
index fd71a00c9462a0a2c1f210e30ce85b9d3275b573..d00d1b0ec1099b54e9f0934cdd34d392a77ed559 100644 (file)
@@ -276,12 +276,10 @@ queue_store_##name(struct request_queue *q, const char *page, size_t count) \
        if (neg)                                                        \
                val = !val;                                             \
                                                                        \
-       spin_lock_irq(q->queue_lock);                                   \
        if (val)                                                        \
-               queue_flag_set(QUEUE_FLAG_##flag, q);                   \
+               blk_queue_flag_set(QUEUE_FLAG_##flag, q);               \
        else                                                            \
-               queue_flag_clear(QUEUE_FLAG_##flag, q);                 \
-       spin_unlock_irq(q->queue_lock);                                 \
+               blk_queue_flag_clear(QUEUE_FLAG_##flag, q);             \
        return ret;                                                     \
 }
 
@@ -414,12 +412,10 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page,
        if (ret < 0)
                return ret;
 
-       spin_lock_irq(q->queue_lock);
        if (poll_on)
-               queue_flag_set(QUEUE_FLAG_POLL, q);
+               blk_queue_flag_set(QUEUE_FLAG_POLL, q);
        else
-               queue_flag_clear(QUEUE_FLAG_POLL, q);
-       spin_unlock_irq(q->queue_lock);
+               blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
 
        return ret;
 }
@@ -487,12 +483,10 @@ static ssize_t queue_wc_store(struct request_queue *q, const char *page,
        if (set == -1)
                return -EINVAL;
 
-       spin_lock_irq(q->queue_lock);
        if (set)
-               queue_flag_set(QUEUE_FLAG_WC, q);
+               blk_queue_flag_set(QUEUE_FLAG_WC, q);
        else
-               queue_flag_clear(QUEUE_FLAG_WC, q);
-       spin_unlock_irq(q->queue_lock);
+               blk_queue_flag_clear(QUEUE_FLAG_WC, q);
 
        return count;
 }
@@ -946,9 +940,7 @@ void blk_unregister_queue(struct gendisk *disk)
         */
        mutex_lock(&q->sysfs_lock);
 
-       spin_lock_irq(q->queue_lock);
-       queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
-       spin_unlock_irq(q->queue_lock);
+       blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
 
        /*
         * Remove the sysfs attributes before unregistering the queue data
index a05e3676d24a262992729201036c7e8d4eb1d9e6..34a55250f08ab31b07b5b0f397207402da83b763 100644 (file)
@@ -57,12 +57,10 @@ ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
                char *p = (char *) buf;
 
                val = simple_strtoul(p, &p, 10);
-               spin_lock_irq(q->queue_lock);
                if (val)
-                       queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
+                       blk_queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
                else
-                       queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
-               spin_unlock_irq(q->queue_lock);
+                       blk_queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
        }
 
        return count;
index c351aaec3ca7a11838f26dcf8898b83e00f99719..f84b3c7887b101547899e4a782f21f31606ce99f 100644 (file)
@@ -707,6 +707,11 @@ struct request_queue {
                                 (1 << QUEUE_FLAG_SAME_COMP)    |       \
                                 (1 << QUEUE_FLAG_POLL))
 
+void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
+void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
+bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
+bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
+
 /*
  * @q->queue_lock is set while a queue is being initialized. Since we know
  * that no other threads access the queue object before @q->queue_lock has