rq->errors = 0;
rq->rq_status = RQ_ACTIVE;
rq->bio = rq->biotail = NULL;
+ rq->ioprio = 0;
rq->buffer = NULL;
rq->ref_count = 1;
rq->q = q;
if (!blk_remove_plug(q))
return;
- /*
- * was plugged, fire request_fn if queue has stuff to do
- */
- if (elv_next_request(q))
- q->request_fn(q);
+ q->request_fn(q);
}
EXPORT_SYMBOL(__generic_unplug_device);
mempool_free(rq, q->rq.rq_pool);
}
-static inline struct request *blk_alloc_request(request_queue_t *q, int rw,
- int gfp_mask)
+static inline struct request *
+blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, int gfp_mask)
{
struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
*/
rq->flags = rw;
- if (!elv_set_request(q, rq, gfp_mask))
+ if (!elv_set_request(q, rq, bio, gfp_mask))
return rq;
mempool_free(rq, q->rq.rq_pool);
/*
* Get a free request, queue_lock must not be held
*/
-static struct request *get_request(request_queue_t *q, int rw, int gfp_mask)
+static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
+ int gfp_mask)
{
struct request *rq = NULL;
struct request_list *rl = &q->rq;
}
}
- switch (elv_may_queue(q, rw)) {
+ switch (elv_may_queue(q, rw, bio)) {
case ELV_MQUEUE_NO:
goto rq_starved;
case ELV_MQUEUE_MAY:
set_queue_congested(q, rw);
spin_unlock_irq(q->queue_lock);
- rq = blk_alloc_request(q, rw, gfp_mask);
+ rq = blk_alloc_request(q, rw, bio, gfp_mask);
if (!rq) {
/*
* Allocation failed presumably due to memory. Undo anything
* No available requests for this queue, unplug the device and wait for some
* requests to become available.
*/
-static struct request *get_request_wait(request_queue_t *q, int rw)
+static struct request *get_request_wait(request_queue_t *q, int rw,
+ struct bio *bio)
{
DEFINE_WAIT(wait);
struct request *rq;
prepare_to_wait_exclusive(&rl->wait[rw], &wait,
TASK_UNINTERRUPTIBLE);
- rq = get_request(q, rw, GFP_NOIO);
+ rq = get_request(q, rw, bio, GFP_NOIO);
if (!rq) {
struct io_context *ioc;
BUG_ON(rw != READ && rw != WRITE);
if (gfp_mask & __GFP_WAIT)
- rq = get_request_wait(q, rw);
+ rq = get_request_wait(q, rw, NULL);
else
- rq = get_request(q, rw, gfp_mask);
+ rq = get_request(q, rw, NULL, gfp_mask);
return rq;
}
return;
req->rq_status = RQ_INACTIVE;
- req->q = NULL;
req->rl = NULL;
/*
req->rq_disk->in_flight--;
}
+ req->ioprio = ioprio_best(req->ioprio, next->ioprio);
+
__blk_put_request(q, next);
return 1;
}
{
struct request *req, *freereq = NULL;
int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err, sync;
+ unsigned short prio;
sector_t sector;
sector = bio->bi_sector;
nr_sectors = bio_sectors(bio);
cur_nr_sectors = bio_cur_sectors(bio);
+ prio = bio_prio(bio);
rw = bio_data_dir(bio);
sync = bio_sync(bio);
req->biotail->bi_next = bio;
req->biotail = bio;
req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+ req->ioprio = ioprio_best(req->ioprio, prio);
drive_stat_acct(req, nr_sectors, 0);
if (!attempt_back_merge(q, req))
elv_merged_request(q, req);
req->hard_cur_sectors = cur_nr_sectors;
req->sector = req->hard_sector = sector;
req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+ req->ioprio = ioprio_best(req->ioprio, prio);
drive_stat_acct(req, nr_sectors, 0);
if (!attempt_front_merge(q, req))
elv_merged_request(q, req);
freereq = NULL;
} else {
spin_unlock_irq(q->queue_lock);
- if ((freereq = get_request(q, rw, GFP_ATOMIC)) == NULL) {
+ if ((freereq = get_request(q, rw, bio, GFP_ATOMIC)) == NULL) {
/*
* READA bit set
*/
if (bio_rw_ahead(bio))
goto end_io;
- freereq = get_request_wait(q, rw);
+ freereq = get_request_wait(q, rw, bio);
}
goto again;
}
req->buffer = bio_data(bio); /* see ->buffer comment above */
req->waiting = NULL;
req->bio = req->biotail = bio;
+ req->ioprio = prio;
req->rq_disk = bio->bi_bdev->bd_disk;
req->start_time = jiffies;
if (bdev != bdev->bd_contains) {
struct hd_struct *p = bdev->bd_part;
- switch (bio->bi_rw) {
+ switch (bio_data_dir(bio)) {
case READ:
p->read_sectors += bio_sectors(bio);
p->reads++;
{
struct request_list *rl = &q->rq;
struct request *rq;
+ int requeued = 0;
spin_lock_irq(q->queue_lock);
clear_bit(QUEUE_FLAG_DRAIN, &q->queue_flags);
rq = list_entry_rq(q->drain_list.next);
list_del_init(&rq->queuelist);
- __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
+ elv_requeue_request(q, rq);
+ requeued++;
}
+ if (requeued)
+ q->request_fn(q);
+
spin_unlock_irq(q->queue_lock);
wake_up(&rl->wait[0]);
BIO_BUG_ON(!bio->bi_size);
BIO_BUG_ON(!bio->bi_io_vec);
- bio->bi_rw = rw;
+ bio->bi_rw |= rw;
if (rw & WRITE)
mod_page_state(pgpgout, count);
else
struct io_context *ioc;
local_irq_save(flags);
+ task_lock(current);
ioc = current->io_context;
current->io_context = NULL;
+ ioc->task = NULL;
+ task_unlock(current);
local_irq_restore(flags);
if (ioc->aic && ioc->aic->exit)
ret = kmem_cache_alloc(iocontext_cachep, gfp_flags);
if (ret) {
atomic_set(&ret->refcount, 1);
- ret->pid = tsk->pid;
+ ret->task = current;
+ ret->set_ioprio = NULL;
ret->last_waited = jiffies; /* doesn't matter... */
ret->nr_batch_requests = 0; /* because this is 0 */
ret->aic = NULL;
ret->cic = NULL;
- spin_lock_init(&ret->lock);
local_irq_save(flags);