diff options
author | Jens Axboe <jaxboe@fusionio.com> | 2010-09-22 21:03:39 +0200 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2010-09-22 21:03:39 +0200 |
commit | 38bb177765247024dad4b70a2abe0044d0574998 (patch) | |
tree | 2d0a80b5500ed0c9c2308ac5b48e64d18cbf7aed | |
parent | 35f2046ac858ca165a8aba477c9236e53a8dbffa (diff) |
block: optimize rq allocation path for less queue lockingblk-alloc-optimize
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
-rw-r--r-- | block/blk-core.c | 32 | ||||
-rw-r--r-- | block/cfq-iosched.c | 29 |
2 files changed, 22 insertions, 39 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 91bc13dd276f..aa94ba3a6225 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -714,8 +714,6 @@ static void freed_request(struct request_queue *q, int sync, int priv) /* * Get a free request, queue_lock must be held. - * Returns NULL on failure, with queue_lock held. - * Returns !NULL on success, with queue_lock *not held*. */ static struct request *get_request(struct request_queue *q, int rw_flags, struct bio *bio, gfp_t gfp_mask) @@ -723,6 +721,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, struct request *rq = NULL; struct request_list *rl = &q->rq; const bool is_sync = rw_is_sync(rw_flags) != 0; + const bool drop_lock = (gfp_mask & __GFP_WAIT) != 0; int may_queue, priv; may_queue = elv_may_queue(q, rw_flags); @@ -737,7 +736,9 @@ static struct request *get_request(struct request_queue *q, int rw_flags, if (blk_queue_io_stat(q)) rw_flags |= REQ_IO_STAT; - spin_unlock_irq(q->queue_lock); + + if (drop_lock) + spin_unlock_irq(q->queue_lock); rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); if (unlikely(!rq)) { @@ -748,12 +749,17 @@ static struct request *get_request(struct request_queue *q, int rw_flags, * Allocating task should really be put onto the front of the * wait queue, but this is pretty rare. */ - spin_lock_irq(q->queue_lock); + if (drop_lock) + spin_lock_irq(q->queue_lock); + freed_request(q, is_sync, priv); goto out; } trace_block_getrq(q, bio, rw_flags & 1); + + if (drop_lock) + spin_lock_irq(q->queue_lock); out: return rq; } @@ -762,7 +768,7 @@ out: * No available requests for this queue, unplug the device and wait for some * requests to become available. * - * Called with q->queue_lock held, and returns with it unlocked. + * Called with q->queue_lock held. */ static struct request *get_request_wait(struct request_queue *q, int rw_flags, struct bio *bio) @@ -770,7 +776,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags, const bool is_sync = rw_is_sync(rw_flags) != 0; struct request *rq; - rq = get_request(q, rw_flags, bio, GFP_NOIO); + rq = get_request(q, rw_flags, bio, GFP_ATOMIC); while (!rq) { DEFINE_WAIT(wait); struct request_list *rl = &q->rq; @@ -800,15 +806,13 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) BUG_ON(rw != READ && rw != WRITE); spin_lock_irq(q->queue_lock); - if (gfp_mask & __GFP_WAIT) { + + if (gfp_mask & __GFP_WAIT) rq = get_request_wait(q, rw, NULL); - } else { + else rq = get_request(q, rw, NULL, gfp_mask); - if (!rq) - spin_unlock_irq(q->queue_lock); - } - /* q->queue_lock is unlocked at this point */ + spin_unlock_irq(q->queue_lock); return rq; } EXPORT_SYMBOL(blk_get_request); @@ -1200,8 +1204,7 @@ get_rq: rw_flags |= REQ_SYNC; /* - * Grab a free request. This is might sleep but can not fail. - * Returns with the queue unlocked. + * Grab a free request. */ req = get_request_wait(q, rw_flags, bio); @@ -1213,7 +1216,6 @@ get_rq: */ init_request_from_bio(req, bio); - spin_lock_irq(q->queue_lock); if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) || bio_flagged(bio, BIO_CPU_AFFINE)) req->cpu = blk_cpu_to_group(smp_processor_id()); diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index f65c6f01c475..3d8635d4cb86 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -2543,12 +2543,12 @@ static void cfq_put_queue(struct cfq_queue *cfqq) cfq_put_cfqg(orig_cfqg); } +typedef void (cic_call_fn)(struct io_context *, struct cfq_io_context *); + /* * Must always be called with the rcu_read_lock() held */ -static void -__call_for_each_cic(struct io_context *ioc, - void (*func)(struct io_context *, struct cfq_io_context *)) +static void __call_for_each_cic(struct io_context *ioc, cic_call_fn *func) { struct cfq_io_context *cic; struct hlist_node *n; @@ -2560,9 +2560,7 @@ __call_for_each_cic(struct io_context *ioc, /* * Call func for each cic attached to this ioc. */ -static void -call_for_each_cic(struct io_context *ioc, - void (*func)(struct io_context *, struct cfq_io_context *)) +static void call_for_each_cic(struct io_context *ioc, cic_call_fn *func) { rcu_read_lock(); __call_for_each_cic(ioc, func); @@ -2787,13 +2785,10 @@ static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic) { struct cfq_data *cfqd = cic_to_cfqd(cic); struct cfq_queue *cfqq; - unsigned long flags; if (unlikely(!cfqd)) return; - spin_lock_irqsave(cfqd->queue->queue_lock, flags); - cfqq = cic->cfqq[BLK_RW_ASYNC]; if (cfqq) { struct cfq_queue *new_cfqq; @@ -2808,8 +2803,6 @@ static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic) cfqq = cic->cfqq[BLK_RW_SYNC]; if (cfqq) cfq_mark_cfqq_prio_changed(cfqq); - - spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); } static void cfq_ioc_set_ioprio(struct io_context *ioc) @@ -3057,11 +3050,8 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, radix_tree_preload_end(); - if (!ret) { - spin_lock_irqsave(cfqd->queue->queue_lock, flags); + if (!ret) list_add(&cic->queue_list, &cfqd->cic_list); - spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); - } } if (ret) @@ -3081,8 +3071,6 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) struct io_context *ioc = NULL; struct cfq_io_context *cic; - might_sleep_if(gfp_mask & __GFP_WAIT); - ioc = get_io_context(gfp_mask, cfqd->queue->node); if (!ioc) return NULL; @@ -3633,14 +3621,10 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) const int rw = rq_data_dir(rq); const bool is_sync = rq_is_sync(rq); struct cfq_queue *cfqq; - unsigned long flags; might_sleep_if(gfp_mask & __GFP_WAIT); cic = cfq_get_io_context(cfqd, gfp_mask); - - spin_lock_irqsave(q->queue_lock, flags); - if (!cic) goto queue_fail; @@ -3673,8 +3657,6 @@ new_queue: cfqq->allocated[rw]++; atomic_inc(&cfqq->ref); - spin_unlock_irqrestore(q->queue_lock, flags); - rq->elevator_private = cic; rq->elevator_private2 = cfqq; rq->elevator_private3 = cfq_ref_get_cfqg(cfqq->cfqg); @@ -3685,7 +3667,6 @@ queue_fail: put_io_context(cic->ioc); cfq_schedule_dispatch(cfqd); - spin_unlock_irqrestore(q->queue_lock, flags); cfq_log(cfqd, "set_request fail"); return 1; } |