block: locking fixes for-2.6.38/multiqueue
authorJens Axboe <jaxboe@fusionio.com>
Mon, 15 Nov 2010 11:03:51 +0000 (12:03 +0100)
committerJens Axboe <jaxboe@fusionio.com>
Mon, 15 Nov 2010 12:34:46 +0000 (13:34 +0100)
Some of this code will want to be reordered, especially around the
plug handling.

Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
block/blk-core.c
block/blk-exec.c
block/elevator.c
include/linux/blk-mq.h

index 2127f5973f44499974e0b1aa134451fb7d6cadaa..06e92db9d674777a217663033bd999b0b65b86a9 100644 (file)
@@ -396,6 +396,8 @@ EXPORT_SYMBOL(blk_sync_queue);
  */
 void __blk_run_queue(struct request_queue *q)
 {
+       assert_spin_locked(q->queue_lock);
+
        blk_remove_plug(q);
 
        if (unlikely(blk_queue_stopped(q)))
@@ -1295,9 +1297,11 @@ get_rq:
        __elv_add_request(ctx, req, where, 0);
 out:
        spin_unlock_irq(&ctx->lock);
-       if (unplug || !queue_should_plug(q))
+       if (unplug || !queue_should_plug(q)) {
+               spin_lock_irq(q->queue_lock);
                __generic_unplug_device(q);
-       spin_unlock_irq(q->queue_lock);
+               spin_unlock_irq(q->queue_lock);
+       }
        return 0;
 }
 
index f16e6667da063ce9a56da68170783ad6f6c76e35..9445d915fb0e1031caa513a0e69bf5e22cf3ed44 100644 (file)
@@ -54,8 +54,11 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
        rq->rq_disk = bd_disk;
        rq->end_io = done;
        WARN_ON(irqs_disabled());
-       spin_lock_irq(q->queue_lock);
+       spin_lock_irq(&ctx->lock);
        __elv_add_request(ctx, rq, where, 1);
+       spin_unlock(&ctx->lock);
+
+       spin_lock(q->queue_lock);
        __generic_unplug_device(q);
        /* the queue is stopped so it won't be plugged+unplugged */
        if (rq->cmd_type == REQ_TYPE_PM_RESUME)
index 22bc0b7c80f325a6017ca373a5274f8020b29f01..aef735dc1a4be90af29be7948c9ee4098237cab4 100644 (file)
@@ -692,7 +692,13 @@ void elv_insert(struct blk_queue_ctx *ctx, struct request *rq, int where)
 
        case ELEVATOR_INSERT_FRONT:
                rq->cmd_flags |= REQ_SOFTBARRIER;
+               queue_ctx_lock_queue(q, ctx);
+
                list_add(&rq->queuelist, &q->queue_head);
+               if (unplug_it && blk_queue_plugged(q) && !queue_in_flight(q))
+                       __generic_unplug_device(q);
+
+               queue_ctx_unlock_queue(q, ctx);
                break;
 
        case ELEVATOR_INSERT_BACK:
@@ -709,7 +715,9 @@ void elv_insert(struct blk_queue_ctx *ctx, struct request *rq, int where)
                 *   with anything.  There's no point in delaying queue
                 *   processing.
                 */
+               queue_ctx_lock_queue(q, ctx);
                __blk_run_queue(q);
+               queue_ctx_unlock_queue(q, ctx);
                break;
 
        case ELEVATOR_INSERT_SORT:
@@ -729,6 +737,12 @@ void elv_insert(struct blk_queue_ctx *ctx, struct request *rq, int where)
                 * elevator_add_req_fn.
                 */
                ctx->queue->elevator->ops->elevator_add_req_fn(ctx, rq);
+
+               if (unplug_it && blk_queue_plugged(q) && !queue_in_flight(q)) {
+                       queue_ctx_lock_queue(q, ctx);
+                       __generic_unplug_device(q);
+                       queue_ctx_unlock_queue(q, ctx);
+               }
                break;
 
        default:
@@ -736,10 +750,6 @@ void elv_insert(struct blk_queue_ctx *ctx, struct request *rq, int where)
                       __func__, where);
                BUG();
        }
-
-
-       if (unplug_it && blk_queue_plugged(q) && !queue_in_flight(q))
-               __generic_unplug_device(q);
 }
 
 void __elv_add_request(struct blk_queue_ctx *ctx, struct request *rq, int where,
@@ -747,6 +757,8 @@ void __elv_add_request(struct blk_queue_ctx *ctx, struct request *rq, int where,
 {
        struct request_queue *q = ctx->queue;
 
+       assert_spin_locked(&ctx->lock);
+
        if (rq->cmd_flags & REQ_SOFTBARRIER) {
                /* barriers are scheduling boundary, update end_sector */
                if (rq->cmd_type == REQ_TYPE_FS ||
@@ -758,8 +770,11 @@ void __elv_add_request(struct blk_queue_ctx *ctx, struct request *rq, int where,
                    where == ELEVATOR_INSERT_SORT)
                where = ELEVATOR_INSERT_BACK;
 
-       if (plug)
+       if (plug) {
+               queue_ctx_lock_queue(q, ctx);
                blk_plug_device(q);
+               queue_ctx_unlock_queue(q, ctx);
+       }
 
        elv_insert(ctx, rq, where);
 }
index 360f3173ea7f541e37ec57a9f46942f41de5bbd5..3dfa06eb61ee23a67008ea3cc13b6fd3a7652bc4 100644 (file)
@@ -75,4 +75,18 @@ static inline int queue_elvpriv(struct request_queue *q)
        return blk_ctx_sum(q, __ctx->rl.elvpriv);
 }
 
+static inline void queue_ctx_lock_queue(struct request_queue *q,
+                                       struct blk_queue_ctx *ctx)
+{
+       spin_unlock(&ctx->lock);
+       spin_lock(q->queue_lock);
+}
+
+static inline void queue_ctx_unlock_queue(struct request_queue *q,
+                                         struct blk_queue_ctx *ctx)
+{
+       spin_unlock(q->queue_lock);
+       spin_lock(&ctx->lock);
+}
+
 #endif