block: extend queue_flag bitops
authorJens Axboe <jens.axboe@oracle.com>
Thu, 3 Jul 2008 11:18:54 +0000 (13:18 +0200)
committerJens Axboe <jens.axboe@oracle.com>
Thu, 3 Jul 2008 11:21:15 +0000 (13:21 +0200)
Add test_and_clear and test_and_set.

Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
block/blk-core.c
include/linux/blkdev.h

index e0fb0bcc0c17dc240ebae88710608c1f1be3a574..dbc7f42b5d2bba5c252137923ec66d3beef482e1 100644 (file)
@@ -205,8 +205,7 @@ void blk_plug_device(struct request_queue *q)
        if (blk_queue_stopped(q))
                return;
 
-       if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
-               __set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags);
+       if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) {
                mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
                blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
        }
@@ -221,10 +220,9 @@ int blk_remove_plug(struct request_queue *q)
 {
        WARN_ON(!irqs_disabled());
 
-       if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
+       if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q))
                return 0;
 
-       queue_flag_clear(QUEUE_FLAG_PLUGGED, q);
        del_timer(&q->unplug_timer);
        return 1;
 }
@@ -328,8 +326,7 @@ void blk_start_queue(struct request_queue *q)
         * one level of recursion is ok and is much faster than kicking
         * the unplug handling
         */
-       if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
-               queue_flag_set(QUEUE_FLAG_REENTER, q);
+       if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
                q->request_fn(q);
                queue_flag_clear(QUEUE_FLAG_REENTER, q);
        } else {
@@ -394,8 +391,7 @@ void __blk_run_queue(struct request_queue *q)
         * handling reinvoke the handler shortly if we already got there.
         */
        if (!elv_queue_empty(q)) {
-               if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
-                       queue_flag_set(QUEUE_FLAG_REENTER, q);
+               if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
                        q->request_fn(q);
                        queue_flag_clear(QUEUE_FLAG_REENTER, q);
                } else {
index ff9d0bdf2a1613b10f45383e2879c9fecbd7929d..e04c4ac8a7cf8ba83348af1a47f370acab422d60 100644 (file)
@@ -428,6 +428,32 @@ static inline void queue_flag_set_unlocked(unsigned int flag,
        __set_bit(flag, &q->queue_flags);
 }
 
+static inline int queue_flag_test_and_clear(unsigned int flag,
+                                           struct request_queue *q)
+{
+       WARN_ON_ONCE(!queue_is_locked(q));
+
+       if (test_bit(flag, &q->queue_flags)) {
+               __clear_bit(flag, &q->queue_flags);
+               return 1;
+       }
+
+       return 0;
+}
+
+static inline int queue_flag_test_and_set(unsigned int flag,
+                                         struct request_queue *q)
+{
+       WARN_ON_ONCE(!queue_is_locked(q));
+
+       if (!test_bit(flag, &q->queue_flags)) {
+               __set_bit(flag, &q->queue_flags);
+               return 0;
+       }
+
+       return 1;
+}
+
 static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
 {
        WARN_ON_ONCE(!queue_is_locked(q));