*/
void __blk_run_queue(struct request_queue *q)
{
+ assert_spin_locked(q->queue_lock);
+
blk_remove_plug(q);
if (unlikely(blk_queue_stopped(q)))
__elv_add_request(ctx, req, where, 0);
out:
spin_unlock_irq(&ctx->lock);
- if (unplug || !queue_should_plug(q))
+ if (unplug || !queue_should_plug(q)) {
+ spin_lock_irq(q->queue_lock);
__generic_unplug_device(q);
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(q->queue_lock);
+ }
return 0;
}
rq->rq_disk = bd_disk;
rq->end_io = done;
WARN_ON(irqs_disabled());
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&ctx->lock);
__elv_add_request(ctx, rq, where, 1);
+ spin_unlock(&ctx->lock);
+
+ spin_lock(q->queue_lock);
__generic_unplug_device(q);
/* the queue is stopped so it won't be plugged+unplugged */
if (rq->cmd_type == REQ_TYPE_PM_RESUME)
case ELEVATOR_INSERT_FRONT:
rq->cmd_flags |= REQ_SOFTBARRIER;
+ queue_ctx_lock_queue(q, ctx);
+
list_add(&rq->queuelist, &q->queue_head);
+ if (unplug_it && blk_queue_plugged(q) && !queue_in_flight(q))
+ __generic_unplug_device(q);
+
+ queue_ctx_unlock_queue(q, ctx);
break;
case ELEVATOR_INSERT_BACK:
* with anything. There's no point in delaying queue
* processing.
*/
+ queue_ctx_lock_queue(q, ctx);
__blk_run_queue(q);
+ queue_ctx_unlock_queue(q, ctx);
break;
case ELEVATOR_INSERT_SORT:
* elevator_add_req_fn.
*/
ctx->queue->elevator->ops->elevator_add_req_fn(ctx, rq);
+
+ if (unplug_it && blk_queue_plugged(q) && !queue_in_flight(q)) {
+ queue_ctx_lock_queue(q, ctx);
+ __generic_unplug_device(q);
+ queue_ctx_unlock_queue(q, ctx);
+ }
break;
default:
__func__, where);
BUG();
}
-
-
- if (unplug_it && blk_queue_plugged(q) && !queue_in_flight(q))
- __generic_unplug_device(q);
}
void __elv_add_request(struct blk_queue_ctx *ctx, struct request *rq, int where,
{
struct request_queue *q = ctx->queue;
+ assert_spin_locked(&ctx->lock);
+
if (rq->cmd_flags & REQ_SOFTBARRIER) {
/* barriers are scheduling boundary, update end_sector */
if (rq->cmd_type == REQ_TYPE_FS ||
where == ELEVATOR_INSERT_SORT)
where = ELEVATOR_INSERT_BACK;
- if (plug)
+ if (plug) {
+ queue_ctx_lock_queue(q, ctx);
blk_plug_device(q);
+ queue_ctx_unlock_queue(q, ctx);
+ }
elv_insert(ctx, rq, where);
}
return blk_ctx_sum(q, __ctx->rl.elvpriv);
}
+static inline void queue_ctx_lock_queue(struct request_queue *q,
+ struct blk_queue_ctx *ctx)
+{
+ spin_unlock(&ctx->lock);
+ spin_lock(q->queue_lock);
+}
+
+static inline void queue_ctx_unlock_queue(struct request_queue *q,
+ struct blk_queue_ctx *ctx)
+{
+ spin_unlock(q->queue_lock);
+ spin_lock(&ctx->lock);
+}
+
#endif