block: convert to pos and nr_sectors accessors
[linux-block.git] / block / blk-core.c
index 996ed906d8ca518c62dba691ad7b579ee794e1a0..82dc20621c068b27d5af8e97e43427625f128f66 100644 (file)
@@ -64,16 +64,15 @@ static struct workqueue_struct *kblockd_workqueue;
 
 static void drive_stat_acct(struct request *rq, int new_io)
 {
-       struct gendisk *disk = rq->rq_disk;
        struct hd_struct *part;
        int rw = rq_data_dir(rq);
        int cpu;
 
-       if (!blk_fs_request(rq) || !disk || !blk_do_io_stat(disk->queue))
+       if (!blk_do_io_stat(rq))
                return;
 
        cpu = part_stat_lock();
-       part = disk_map_sector_rcu(rq->rq_disk, rq->sector);
+       part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
 
        if (!new_io)
                part_stat_inc(cpu, part, merges[rw]);
@@ -132,8 +131,10 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
        INIT_HLIST_NODE(&rq->hash);
        RB_CLEAR_NODE(&rq->rb_node);
        rq->cmd = rq->__cmd;
+       rq->cmd_len = BLK_MAX_CDB;
        rq->tag = -1;
        rq->ref_count = 1;
+       rq->start_time = jiffies;
 }
 EXPORT_SYMBOL(blk_rq_init);
 
@@ -184,14 +185,12 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
                rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
                rq->cmd_flags);
 
-       printk(KERN_INFO "  sector %llu, nr/cnr %lu/%u\n",
-                                               (unsigned long long)rq->sector,
-                                               rq->nr_sectors,
-                                               rq->current_nr_sectors);
-       printk(KERN_INFO "  bio %p, biotail %p, buffer %p, data %p, len %u\n",
+       printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
+              (unsigned long long)blk_rq_pos(rq),
+              blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
+       printk(KERN_INFO "  bio %p, biotail %p, buffer %p, len %u\n",
                                                rq->bio, rq->biotail,
-                                               rq->buffer, rq->data,
-                                               rq->data_len);
+                                               rq->buffer, rq->data_len);
 
        if (blk_pc_request(rq)) {
                printk(KERN_INFO "  cdb: ");
@@ -333,24 +332,6 @@ void blk_unplug(struct request_queue *q)
 }
 EXPORT_SYMBOL(blk_unplug);
 
-static void blk_invoke_request_fn(struct request_queue *q)
-{
-       if (unlikely(blk_queue_stopped(q)))
-               return;
-
-       /*
-        * one level of recursion is ok and is much faster than kicking
-        * the unplug handling
-        */
-       if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
-               q->request_fn(q);
-               queue_flag_clear(QUEUE_FLAG_REENTER, q);
-       } else {
-               queue_flag_set(QUEUE_FLAG_PLUGGED, q);
-               kblockd_schedule_work(q, &q->unplug_work);
-       }
-}
-
 /**
  * blk_start_queue - restart a previously stopped queue
  * @q:    The &struct request_queue in question
@@ -365,7 +346,7 @@ void blk_start_queue(struct request_queue *q)
        WARN_ON(!irqs_disabled());
 
        queue_flag_clear(QUEUE_FLAG_STOPPED, q);
-       blk_invoke_request_fn(q);
+       __blk_run_queue(q);
 }
 EXPORT_SYMBOL(blk_start_queue);
 
@@ -425,12 +406,23 @@ void __blk_run_queue(struct request_queue *q)
 {
        blk_remove_plug(q);
 
+       if (unlikely(blk_queue_stopped(q)))
+               return;
+
+       if (elv_queue_empty(q))
+               return;
+
        /*
         * Only recurse once to avoid overrunning the stack, let the unplug
         * handling reinvoke the handler shortly if we already got there.
         */
-       if (!elv_queue_empty(q))
-               blk_invoke_request_fn(q);
+       if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
+               q->request_fn(q);
+               queue_flag_clear(QUEUE_FLAG_REENTER, q);
+       } else {
+               queue_flag_set(QUEUE_FLAG_PLUGGED, q);
+               kblockd_schedule_work(q, &q->unplug_work);
+       }
 }
 EXPORT_SYMBOL(__blk_run_queue);
 
@@ -440,9 +432,7 @@ EXPORT_SYMBOL(__blk_run_queue);
  *
  * Description:
  *    Invoke request handling on this queue, if it has pending work to do.
- *    May be used to restart queueing when a request has completed. Also
- *    See @blk_start_queueing.
- *
+ *    May be used to restart queueing when a request has completed.
  */
 void blk_run_queue(struct request_queue *q)
 {
@@ -484,11 +474,11 @@ static int blk_init_free_list(struct request_queue *q)
 {
        struct request_list *rl = &q->rq;
 
-       rl->count[READ] = rl->count[WRITE] = 0;
-       rl->starved[READ] = rl->starved[WRITE] = 0;
+       rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
+       rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
        rl->elvpriv = 0;
-       init_waitqueue_head(&rl->wait[READ]);
-       init_waitqueue_head(&rl->wait[WRITE]);
+       init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
+       init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
 
        rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
                                mempool_free_slab, request_cachep, q->node);
@@ -643,7 +633,7 @@ static inline void blk_free_request(struct request_queue *q, struct request *rq)
 }
 
 static struct request *
-blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
+blk_alloc_request(struct request_queue *q, int flags, int priv, gfp_t gfp_mask)
 {
        struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
 
@@ -652,7 +642,7 @@ blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
 
        blk_rq_init(q, rq);
 
-       rq->cmd_flags = rw | REQ_ALLOCED;
+       rq->cmd_flags = flags | REQ_ALLOCED;
 
        if (priv) {
                if (unlikely(elv_set_request(q, rq, gfp_mask))) {
@@ -699,18 +689,18 @@ static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
        ioc->last_waited = jiffies;
 }
 
-static void __freed_request(struct request_queue *q, int rw)
+static void __freed_request(struct request_queue *q, int sync)
 {
        struct request_list *rl = &q->rq;
 
-       if (rl->count[rw] < queue_congestion_off_threshold(q))
-               blk_clear_queue_congested(q, rw);
+       if (rl->count[sync] < queue_congestion_off_threshold(q))
+               blk_clear_queue_congested(q, sync);
 
-       if (rl->count[rw] + 1 <= q->nr_requests) {
-               if (waitqueue_active(&rl->wait[rw]))
-                       wake_up(&rl->wait[rw]);
+       if (rl->count[sync] + 1 <= q->nr_requests) {
+               if (waitqueue_active(&rl->wait[sync]))
+                       wake_up(&rl->wait[sync]);
 
-               blk_clear_queue_full(q, rw);
+               blk_clear_queue_full(q, sync);
        }
 }
 
@@ -718,18 +708,18 @@ static void __freed_request(struct request_queue *q, int rw)
  * A request has just been released.  Account for it, update the full and
  * congestion status, wake up any waiters.   Called under q->queue_lock.
  */
-static void freed_request(struct request_queue *q, int rw, int priv)
+static void freed_request(struct request_queue *q, int sync, int priv)
 {
        struct request_list *rl = &q->rq;
 
-       rl->count[rw]--;
+       rl->count[sync]--;
        if (priv)
                rl->elvpriv--;
 
-       __freed_request(q, rw);
+       __freed_request(q, sync);
 
-       if (unlikely(rl->starved[rw ^ 1]))
-               __freed_request(q, rw ^ 1);
+       if (unlikely(rl->starved[sync ^ 1]))
+               __freed_request(q, sync ^ 1);
 }
 
 /*
@@ -743,15 +733,15 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
        struct request *rq = NULL;
        struct request_list *rl = &q->rq;
        struct io_context *ioc = NULL;
-       const int rw = rw_flags & 0x01;
+       const bool is_sync = rw_is_sync(rw_flags) != 0;
        int may_queue, priv;
 
        may_queue = elv_may_queue(q, rw_flags);
        if (may_queue == ELV_MQUEUE_NO)
                goto rq_starved;
 
-       if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
-               if (rl->count[rw]+1 >= q->nr_requests) {
+       if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
+               if (rl->count[is_sync]+1 >= q->nr_requests) {
                        ioc = current_io_context(GFP_ATOMIC, q->node);
                        /*
                         * The queue will fill after this allocation, so set
@@ -759,9 +749,9 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
                         * This process will be allowed to complete a batch of
                         * requests, others will be blocked.
                         */
-                       if (!blk_queue_full(q, rw)) {
+                       if (!blk_queue_full(q, is_sync)) {
                                ioc_set_batching(q, ioc);
-                               blk_set_queue_full(q, rw);
+                               blk_set_queue_full(q, is_sync);
                        } else {
                                if (may_queue != ELV_MQUEUE_MUST
                                                && !ioc_batching(q, ioc)) {
@@ -774,7 +764,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
                                }
                        }
                }
-               blk_set_queue_congested(q, rw);
+               blk_set_queue_congested(q, is_sync);
        }
 
        /*
@@ -782,16 +772,18 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
         * limit of requests, otherwise we could have thousands of requests
         * allocated with any setting of ->nr_requests
         */
-       if (rl->count[rw] >= (3 * q->nr_requests / 2))
+       if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
                goto out;
 
-       rl->count[rw]++;
-       rl->starved[rw] = 0;
+       rl->count[is_sync]++;
+       rl->starved[is_sync] = 0;
 
        priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
        if (priv)
                rl->elvpriv++;
 
+       if (blk_queue_io_stat(q))
+               rw_flags |= REQ_IO_STAT;
        spin_unlock_irq(q->queue_lock);
 
        rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
@@ -804,7 +796,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
                 * wait queue, but this is pretty rare.
                 */
                spin_lock_irq(q->queue_lock);
-               freed_request(q, rw, priv);
+               freed_request(q, is_sync, priv);
 
                /*
                 * in the very unlikely event that allocation failed and no
@@ -814,8 +806,8 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
                 * rq mempool into READ and WRITE
                 */
 rq_starved:
-               if (unlikely(rl->count[rw] == 0))
-                       rl->starved[rw] = 1;
+               if (unlikely(rl->count[is_sync] == 0))
+                       rl->starved[is_sync] = 1;
 
                goto out;
        }
@@ -829,7 +821,7 @@ rq_starved:
        if (ioc_batching(q, ioc))
                ioc->nr_batch_requests--;
 
-       trace_block_getrq(q, bio, rw);
+       trace_block_getrq(q, bio, rw_flags & 1);
 out:
        return rq;
 }
@@ -843,7 +835,7 @@ out:
 static struct request *get_request_wait(struct request_queue *q, int rw_flags,
                                        struct bio *bio)
 {
-       const int rw = rw_flags & 0x01;
+       const bool is_sync = rw_is_sync(rw_flags) != 0;
        struct request *rq;
 
        rq = get_request(q, rw_flags, bio, GFP_NOIO);
@@ -852,10 +844,10 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
                struct io_context *ioc;
                struct request_list *rl = &q->rq;
 
-               prepare_to_wait_exclusive(&rl->wait[rw], &wait,
+               prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
                                TASK_UNINTERRUPTIBLE);
 
-               trace_block_sleeprq(q, bio, rw);
+               trace_block_sleeprq(q, bio, rw_flags & 1);
 
                __generic_unplug_device(q);
                spin_unlock_irq(q->queue_lock);
@@ -871,7 +863,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
                ioc_set_batching(q, ioc);
 
                spin_lock_irq(q->queue_lock);
-               finish_wait(&rl->wait[rw], &wait);
+               finish_wait(&rl->wait[is_sync], &wait);
 
                rq = get_request(q, rw_flags, bio, GFP_NOIO);
        };
@@ -899,28 +891,6 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
 }
 EXPORT_SYMBOL(blk_get_request);
 
-/**
- * blk_start_queueing - initiate dispatch of requests to device
- * @q:         request queue to kick into gear
- *
- * This is basically a helper to remove the need to know whether a queue
- * is plugged or not if someone just wants to initiate dispatch of requests
- * for this queue. Should be used to start queueing on a device outside
- * of ->request_fn() context. Also see @blk_run_queue.
- *
- * The queue lock must be held with interrupts disabled.
- */
-void blk_start_queueing(struct request_queue *q)
-{
-       if (!blk_queue_plugged(q)) {
-               if (unlikely(blk_queue_stopped(q)))
-                       return;
-               q->request_fn(q);
-       } else
-               __generic_unplug_device(q);
-}
-EXPORT_SYMBOL(blk_start_queueing);
-
 /**
  * blk_requeue_request - put a request back on queue
  * @q:         request queue where request should be inserted
@@ -975,7 +945,6 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
         * barrier
         */
        rq->cmd_type = REQ_TYPE_SPECIAL;
-       rq->cmd_flags |= REQ_SOFTBARRIER;
 
        rq->special = data;
 
@@ -989,7 +958,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
 
        drive_stat_acct(rq, 1);
        __elv_add_request(q, rq, where, 0);
-       blk_start_queueing(q);
+       __blk_run_queue(q);
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 EXPORT_SYMBOL(blk_insert_request);
@@ -1070,14 +1039,14 @@ void __blk_put_request(struct request_queue *q, struct request *req)
         * it didn't come out of our reserved rq pools
         */
        if (req->cmd_flags & REQ_ALLOCED) {
-               int rw = rq_data_dir(req);
+               int is_sync = rq_is_sync(req) != 0;
                int priv = req->cmd_flags & REQ_ELVPRIV;
 
                BUG_ON(!list_empty(&req->queuelist));
                BUG_ON(!hlist_unhashed(&req->hash));
 
                blk_free_request(q, req);
-               freed_request(q, rw, priv);
+               freed_request(q, is_sync, priv);
        }
 }
 EXPORT_SYMBOL_GPL(__blk_put_request);
@@ -1111,31 +1080,36 @@ void init_request_from_bio(struct request *req, struct bio *bio)
        if (bio_failfast_driver(bio))
                req->cmd_flags |= REQ_FAILFAST_DRIVER;
 
-       /*
-        * REQ_BARRIER implies no merging, but lets make it explicit
-        */
        if (unlikely(bio_discard(bio))) {
                req->cmd_flags |= REQ_DISCARD;
                if (bio_barrier(bio))
                        req->cmd_flags |= REQ_SOFTBARRIER;
                req->q->prepare_discard_fn(req->q, req);
        } else if (unlikely(bio_barrier(bio)))
-               req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
+               req->cmd_flags |= REQ_HARDBARRIER;
 
        if (bio_sync(bio))
                req->cmd_flags |= REQ_RW_SYNC;
-       if (bio_unplug(bio))
-               req->cmd_flags |= REQ_UNPLUG;
        if (bio_rw_meta(bio))
                req->cmd_flags |= REQ_RW_META;
+       if (bio_noidle(bio))
+               req->cmd_flags |= REQ_NOIDLE;
 
        req->errors = 0;
        req->hard_sector = req->sector = bio->bi_sector;
        req->ioprio = bio_prio(bio);
-       req->start_time = jiffies;
        blk_rq_bio_prep(req->q, req, bio);
 }
 
+/*
+ * Only disabling plugging for non-rotational devices if it does tagging
+ * as well, otherwise we do need the proper merging
+ */
+static inline bool queue_should_plug(struct request_queue *q)
+{
+       return !(blk_queue_nonrot(q) && blk_queue_tagged(q));
+}
+
 static int __make_request(struct request_queue *q, struct bio *bio)
 {
        struct request *req;
@@ -1242,11 +1216,11 @@ get_rq:
        if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
            bio_flagged(bio, BIO_CPU_AFFINE))
                req->cpu = blk_cpu_to_group(smp_processor_id());
-       if (!blk_queue_nonrot(q) && elv_queue_empty(q))
+       if (queue_should_plug(q) && elv_queue_empty(q))
                blk_plug_device(q);
        add_request(q, req);
 out:
-       if (unplug || blk_queue_nonrot(q))
+       if (unplug || !queue_should_plug(q))
                __generic_unplug_device(q);
        spin_unlock_irq(q->queue_lock);
        return 0;
@@ -1582,7 +1556,7 @@ EXPORT_SYMBOL(submit_bio);
  */
 int blk_rq_check_limits(struct request_queue *q, struct request *rq)
 {
-       if (rq->nr_sectors > q->max_sectors ||
+       if (blk_rq_sectors(rq) > q->max_sectors ||
            rq->data_len > q->max_hw_sectors << 9) {
                printk(KERN_ERR "%s: over max size limit.\n", __func__);
                return -EIO;
@@ -1664,18 +1638,13 @@ EXPORT_SYMBOL(blkdev_dequeue_request);
 
 static void blk_account_io_completion(struct request *req, unsigned int bytes)
 {
-       struct gendisk *disk = req->rq_disk;
-
-       if (!disk || !blk_do_io_stat(disk->queue))
-               return;
-
-       if (blk_fs_request(req)) {
+       if (blk_do_io_stat(req)) {
                const int rw = rq_data_dir(req);
                struct hd_struct *part;
                int cpu;
 
                cpu = part_stat_lock();
-               part = disk_map_sector_rcu(req->rq_disk, req->sector);
+               part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
                part_stat_add(cpu, part, sectors[rw], bytes >> 9);
                part_stat_unlock();
        }
@@ -1683,24 +1652,19 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes)
 
 static void blk_account_io_done(struct request *req)
 {
-       struct gendisk *disk = req->rq_disk;
-
-       if (!disk || !blk_do_io_stat(disk->queue))
-               return;
-
        /*
         * Account IO completion.  bar_rq isn't accounted as a normal
         * IO on queueing nor completion.  Accounting the containing
         * request is enough.
         */
-       if (blk_fs_request(req) && req != &req->q->bar_rq) {
+       if (blk_do_io_stat(req) && req != &req->q->bar_rq) {
                unsigned long duration = jiffies - req->start_time;
                const int rw = rq_data_dir(req);
                struct hd_struct *part;
                int cpu;
 
                cpu = part_stat_lock();
-               part = disk_map_sector_rcu(disk, req->sector);
+               part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
 
                part_stat_inc(cpu, part, ios[rw]);
                part_stat_add(cpu, part, ticks[rw], duration);
@@ -1712,38 +1676,176 @@ static void blk_account_io_done(struct request *req)
 }
 
 /**
- * __end_that_request_first - end I/O on a request
- * @req:      the request being processed
+ * blk_rq_bytes - Returns bytes left to complete in the entire request
+ * @rq: the request being processed
+ **/
+unsigned int blk_rq_bytes(struct request *rq)
+{
+       if (blk_fs_request(rq))
+               return blk_rq_sectors(rq) << 9;
+
+       return rq->data_len;
+}
+EXPORT_SYMBOL_GPL(blk_rq_bytes);
+
+/**
+ * blk_rq_cur_bytes - Returns bytes left to complete in the current segment
+ * @rq: the request being processed
+ **/
+unsigned int blk_rq_cur_bytes(struct request *rq)
+{
+       if (blk_fs_request(rq))
+               return rq->current_nr_sectors << 9;
+
+       if (rq->bio)
+               return rq->bio->bi_size;
+
+       return rq->data_len;
+}
+EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
+
+struct request *elv_next_request(struct request_queue *q)
+{
+       struct request *rq;
+       int ret;
+
+       while ((rq = __elv_next_request(q)) != NULL) {
+               if (!(rq->cmd_flags & REQ_STARTED)) {
+                       /*
+                        * This is the first time the device driver
+                        * sees this request (possibly after
+                        * requeueing).  Notify IO scheduler.
+                        */
+                       if (blk_sorted_rq(rq))
+                               elv_activate_rq(q, rq);
+
+                       /*
+                        * just mark as started even if we don't start
+                        * it, a request that has been delayed should
+                        * not be passed by new incoming requests
+                        */
+                       rq->cmd_flags |= REQ_STARTED;
+                       trace_block_rq_issue(q, rq);
+               }
+
+               if (!q->boundary_rq || q->boundary_rq == rq) {
+                       q->end_sector = rq_end_sector(rq);
+                       q->boundary_rq = NULL;
+               }
+
+               if (rq->cmd_flags & REQ_DONTPREP)
+                       break;
+
+               if (q->dma_drain_size && rq->data_len) {
+                       /*
+                        * make sure space for the drain appears we
+                        * know we can do this because max_hw_segments
+                        * has been adjusted to be one fewer than the
+                        * device can handle
+                        */
+                       rq->nr_phys_segments++;
+               }
+
+               if (!q->prep_rq_fn)
+                       break;
+
+               ret = q->prep_rq_fn(q, rq);
+               if (ret == BLKPREP_OK) {
+                       break;
+               } else if (ret == BLKPREP_DEFER) {
+                       /*
+                        * the request may have been (partially) prepped.
+                        * we need to keep this request in the front to
+                        * avoid resource deadlock.  REQ_STARTED will
+                        * prevent other fs requests from passing this one.
+                        */
+                       if (q->dma_drain_size && rq->data_len &&
+                           !(rq->cmd_flags & REQ_DONTPREP)) {
+                               /*
+                                * remove the space for the drain we added
+                                * so that we don't add it again
+                                */
+                               --rq->nr_phys_segments;
+                       }
+
+                       rq = NULL;
+                       break;
+               } else if (ret == BLKPREP_KILL) {
+                       rq->cmd_flags |= REQ_QUIET;
+                       __blk_end_request_all(rq, -EIO);
+               } else {
+                       printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
+                       break;
+               }
+       }
+
+       return rq;
+}
+EXPORT_SYMBOL(elv_next_request);
+
+void elv_dequeue_request(struct request_queue *q, struct request *rq)
+{
+       BUG_ON(list_empty(&rq->queuelist));
+       BUG_ON(ELV_ON_HASH(rq));
+
+       list_del_init(&rq->queuelist);
+
+       /*
+        * the time frame between a request being removed from the lists
+        * and to it is freed is accounted as io that is in progress at
+        * the driver side.
+        */
+       if (blk_account_rq(rq))
+               q->in_flight++;
+}
+
+/**
+ * blk_update_request - Special helper function for request stacking drivers
+ * @rq:              the request being processed
  * @error:    %0 for success, < %0 for error
- * @nr_bytes: number of bytes to complete
+ * @nr_bytes: number of bytes to complete @rq
  *
  * Description:
- *     Ends I/O on a number of bytes attached to @req, and sets it up
- *     for the next range of segments (if any) in the cluster.
+ *     Ends I/O on a number of bytes attached to @rq, but doesn't complete
+ *     the request structure even if @rq doesn't have leftover.
+ *     If @rq has leftover, sets it up for the next range of segments.
+ *
+ *     This special helper function is only for request stacking drivers
+ *     (e.g. request-based dm) so that they can handle partial completion.
+ *     Actual device drivers should use blk_end_request instead.
+ *
+ *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
+ *     %false return from this function.
  *
  * Return:
- *     %0 - we are done with this request, call end_that_request_last()
- *     %1 - still buffers pending for this request
+ *     %false - this request doesn't have any more data
+ *     %true  - this request has more data
  **/
-static int __end_that_request_first(struct request *req, int error,
-                                   int nr_bytes)
+bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
 {
        int total_bytes, bio_nbytes, next_idx = 0;
        struct bio *bio;
 
+       if (!req->bio)
+               return false;
+
        trace_block_rq_complete(req->q, req);
 
        /*
-        * for a REQ_TYPE_BLOCK_PC request, we want to carry any eventual
-        * sense key with us all the way through
+        * For fs requests, rq is just carrier of independent bio's
+        * and each partial completion should be handled separately.
+        * Reset per-request error on each partial completion.
+        *
+        * TODO: tj: This is too subtle.  It would be better to let
+        * low level drivers do what they see fit.
         */
-       if (!blk_pc_request(req))
+       if (blk_fs_request(req))
                req->errors = 0;
 
        if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
                printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
                                req->rq_disk ? req->rq_disk->disk_name : "?",
-                               (unsigned long long)req->sector);
+                               (unsigned long long)blk_rq_pos(req));
        }
 
        blk_account_io_completion(req, nr_bytes);
@@ -1803,8 +1905,16 @@ static int __end_that_request_first(struct request *req, int error,
        /*
         * completely done
         */
-       if (!req->bio)
-               return 0;
+       if (!req->bio) {
+               /*
+                * Reset counters so that the request stacking driver
+                * can find how many bytes remain in the request
+                * later.
+                */
+               req->nr_sectors = req->hard_nr_sectors = 0;
+               req->current_nr_sectors = req->hard_cur_sectors = 0;
+               return false;
+       }
 
        /*
         * if the request wasn't completed, update state
@@ -1818,13 +1928,31 @@ static int __end_that_request_first(struct request *req, int error,
 
        blk_recalc_rq_sectors(req, total_bytes >> 9);
        blk_recalc_rq_segments(req);
-       return 1;
+       return true;
+}
+EXPORT_SYMBOL_GPL(blk_update_request);
+
+static bool blk_update_bidi_request(struct request *rq, int error,
+                                   unsigned int nr_bytes,
+                                   unsigned int bidi_bytes)
+{
+       if (blk_update_request(rq, error, nr_bytes))
+               return true;
+
+       /* Bidi request must be completed as a whole */
+       if (unlikely(blk_bidi_rq(rq)) &&
+           blk_update_request(rq->next_rq, error, bidi_bytes))
+               return true;
+
+       add_disk_randomness(rq->rq_disk);
+
+       return false;
 }
 
 /*
  * queue lock must be held
  */
-static void end_that_request_last(struct request *req, int error)
+static void blk_finish_request(struct request *req, int error)
 {
        if (blk_rq_tagged(req))
                blk_queue_end_tag(req->q, req);
@@ -1850,248 +1978,65 @@ static void end_that_request_last(struct request *req, int error)
 }
 
 /**
- * blk_rq_bytes - Returns bytes left to complete in the entire request
- * @rq: the request being processed
- **/
-unsigned int blk_rq_bytes(struct request *rq)
-{
-       if (blk_fs_request(rq))
-               return rq->hard_nr_sectors << 9;
-
-       return rq->data_len;
-}
-EXPORT_SYMBOL_GPL(blk_rq_bytes);
-
-/**
- * blk_rq_cur_bytes - Returns bytes left to complete in the current segment
- * @rq: the request being processed
- **/
-unsigned int blk_rq_cur_bytes(struct request *rq)
-{
-       if (blk_fs_request(rq))
-               return rq->current_nr_sectors << 9;
-
-       if (rq->bio)
-               return rq->bio->bi_size;
-
-       return rq->data_len;
-}
-EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
-
-/**
- * end_request - end I/O on the current segment of the request
- * @req:       the request being processed
- * @uptodate:  error value or %0/%1 uptodate flag
- *
- * Description:
- *     Ends I/O on the current segment of a request. If that is the only
- *     remaining segment, the request is also completed and freed.
- *
- *     This is a remnant of how older block drivers handled I/O completions.
- *     Modern drivers typically end I/O on the full request in one go, unless
- *     they have a residual value to account for. For that case this function
- *     isn't really useful, unless the residual just happens to be the
- *     full current segment. In other words, don't use this function in new
- *     code. Use blk_end_request() or __blk_end_request() to end a request.
- **/
-void end_request(struct request *req, int uptodate)
-{
-       int error = 0;
-
-       if (uptodate <= 0)
-               error = uptodate ? uptodate : -EIO;
-
-       __blk_end_request(req, error, req->hard_cur_sectors << 9);
-}
-EXPORT_SYMBOL(end_request);
-
-static int end_that_request_data(struct request *rq, int error,
-                                unsigned int nr_bytes, unsigned int bidi_bytes)
-{
-       if (rq->bio) {
-               if (__end_that_request_first(rq, error, nr_bytes))
-                       return 1;
-
-               /* Bidi request must be completed as a whole */
-               if (blk_bidi_rq(rq) &&
-                   __end_that_request_first(rq->next_rq, error, bidi_bytes))
-                       return 1;
-       }
-
-       return 0;
-}
-
-/**
- * blk_end_io - Generic end_io function to complete a request.
- * @rq:           the request being processed
- * @error:        %0 for success, < %0 for error
- * @nr_bytes:     number of bytes to complete @rq
- * @bidi_bytes:   number of bytes to complete @rq->next_rq
- * @drv_callback: function called between completion of bios in the request
- *                and completion of the request.
- *                If the callback returns non %0, this helper returns without
- *                completion of the request.
+ * blk_end_bidi_request - Complete a bidi request
+ * @rq:         the request to complete
+ * @error:      %0 for success, < %0 for error
+ * @nr_bytes:   number of bytes to complete @rq
+ * @bidi_bytes: number of bytes to complete @rq->next_rq
  *
  * Description:
  *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
- *     If @rq has leftover, sets it up for the next range of segments.
+ *     Drivers that supports bidi can safely call this member for any
+ *     type of request, bidi or uni.  In the later case @bidi_bytes is
+ *     just ignored.
  *
  * Return:
- *     %0 - we are done with this request
- *     %1 - this request is not freed yet, it still has pending buffers.
+ *     %false - we are done with this request
+ *     %true  - still buffers pending for this request
  **/
-static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
-                     unsigned int bidi_bytes,
-                     int (drv_callback)(struct request *))
+bool blk_end_bidi_request(struct request *rq, int error,
+                         unsigned int nr_bytes, unsigned int bidi_bytes)
 {
        struct request_queue *q = rq->q;
-       unsigned long flags = 0UL;
-
-       if (end_that_request_data(rq, error, nr_bytes, bidi_bytes))
-               return 1;
+       unsigned long flags;
 
-       /* Special feature for tricky drivers */
-       if (drv_callback && drv_callback(rq))
-               return 1;
-
-       add_disk_randomness(rq->rq_disk);
+       if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
+               return true;
 
        spin_lock_irqsave(q->queue_lock, flags);
-       end_that_request_last(rq, error);
+       blk_finish_request(rq, error);
        spin_unlock_irqrestore(q->queue_lock, flags);
 
-       return 0;
-}
-
-/**
- * blk_end_request - Helper function for drivers to complete the request.
- * @rq:       the request being processed
- * @error:    %0 for success, < %0 for error
- * @nr_bytes: number of bytes to complete
- *
- * Description:
- *     Ends I/O on a number of bytes attached to @rq.
- *     If @rq has leftover, sets it up for the next range of segments.
- *
- * Return:
- *     %0 - we are done with this request
- *     %1 - still buffers pending for this request
- **/
-int blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
-{
-       return blk_end_io(rq, error, nr_bytes, 0, NULL);
-}
-EXPORT_SYMBOL_GPL(blk_end_request);
-
-/**
- * __blk_end_request - Helper function for drivers to complete the request.
- * @rq:       the request being processed
- * @error:    %0 for success, < %0 for error
- * @nr_bytes: number of bytes to complete
- *
- * Description:
- *     Must be called with queue lock held unlike blk_end_request().
- *
- * Return:
- *     %0 - we are done with this request
- *     %1 - still buffers pending for this request
- **/
-int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
-{
-       if (rq->bio && __end_that_request_first(rq, error, nr_bytes))
-               return 1;
-
-       add_disk_randomness(rq->rq_disk);
-
-       end_that_request_last(rq, error);
-
-       return 0;
+       return false;
 }
-EXPORT_SYMBOL_GPL(__blk_end_request);
+EXPORT_SYMBOL_GPL(blk_end_bidi_request);
 
 /**
- * blk_end_bidi_request - Helper function for drivers to complete bidi request.
- * @rq:         the bidi request being processed
+ * __blk_end_bidi_request - Complete a bidi request with queue lock held
+ * @rq:         the request to complete
  * @error:      %0 for success, < %0 for error
  * @nr_bytes:   number of bytes to complete @rq
  * @bidi_bytes: number of bytes to complete @rq->next_rq
  *
  * Description:
- *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
+ *     Identical to blk_end_bidi_request() except that queue lock is
+ *     assumed to be locked on entry and remains so on return.
  *
  * Return:
- *     %0 - we are done with this request
- *     %1 - still buffers pending for this request
+ *     %false - we are done with this request
+ *     %true  - still buffers pending for this request
  **/
-int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes,
-                        unsigned int bidi_bytes)
+bool __blk_end_bidi_request(struct request *rq, int error,
+                           unsigned int nr_bytes, unsigned int bidi_bytes)
 {
-       return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL);
-}
-EXPORT_SYMBOL_GPL(blk_end_bidi_request);
+       if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
+               return true;
 
-/**
- * blk_update_request - Special helper function for request stacking drivers
- * @rq:           the request being processed
- * @error:        %0 for success, < %0 for error
- * @nr_bytes:     number of bytes to complete @rq
- *
- * Description:
- *     Ends I/O on a number of bytes attached to @rq, but doesn't complete
- *     the request structure even if @rq doesn't have leftover.
- *     If @rq has leftover, sets it up for the next range of segments.
- *
- *     This special helper function is only for request stacking drivers
- *     (e.g. request-based dm) so that they can handle partial completion.
- *     Actual device drivers should use blk_end_request instead.
- */
-void blk_update_request(struct request *rq, int error, unsigned int nr_bytes)
-{
-       if (!end_that_request_data(rq, error, nr_bytes, 0)) {
-               /*
-                * These members are not updated in end_that_request_data()
-                * when all bios are completed.
-                * Update them so that the request stacking driver can find
-                * how many bytes remain in the request later.
-                */
-               rq->nr_sectors = rq->hard_nr_sectors = 0;
-               rq->current_nr_sectors = rq->hard_cur_sectors = 0;
-       }
-}
-EXPORT_SYMBOL_GPL(blk_update_request);
+       blk_finish_request(rq, error);
 
-/**
- * blk_end_request_callback - Special helper function for tricky drivers
- * @rq:           the request being processed
- * @error:        %0 for success, < %0 for error
- * @nr_bytes:     number of bytes to complete
- * @drv_callback: function called between completion of bios in the request
- *                and completion of the request.
- *                If the callback returns non %0, this helper returns without
- *                completion of the request.
- *
- * Description:
- *     Ends I/O on a number of bytes attached to @rq.
- *     If @rq has leftover, sets it up for the next range of segments.
- *
- *     This special helper function is used only for existing tricky drivers.
- *     (e.g. cdrom_newpc_intr() of ide-cd)
- *     This interface will be removed when such drivers are rewritten.
- *     Don't use this interface in other places anymore.
- *
- * Return:
- *     %0 - we are done with this request
- *     %1 - this request is not freed yet.
- *          this request still has pending buffers or
- *          the driver doesn't want to finish this request yet.
- **/
-int blk_end_request_callback(struct request *rq, int error,
-                            unsigned int nr_bytes,
-                            int (drv_callback)(struct request *))
-{
-       return blk_end_io(rq, error, nr_bytes, 0, drv_callback);
+       return false;
 }
-EXPORT_SYMBOL_GPL(blk_end_request_callback);
+EXPORT_SYMBOL_GPL(__blk_end_bidi_request);
 
 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
                     struct bio *bio)
@@ -2151,6 +2096,9 @@ EXPORT_SYMBOL(kblockd_schedule_work);
 
 int __init blk_dev_init(void)
 {
+       BUILD_BUG_ON(__REQ_NR_BITS > 8 *
+                       sizeof(((struct request *)0)->cmd_flags));
+
        kblockd_workqueue = create_workqueue("kblockd");
        if (!kblockd_workqueue)
                panic("Failed to create kblockd\n");