block: implement and use [__]blk_end_request_all()
authorTejun Heo <tj@kernel.org>
Thu, 23 Apr 2009 02:05:19 +0000 (11:05 +0900)
committerJens Axboe <jens.axboe@oracle.com>
Tue, 28 Apr 2009 05:37:35 +0000 (07:37 +0200)
There are many [__]blk_end_request() call sites which call it with
full request length and expect full completion.  Many of them ensure
that the request actually completes by doing BUG_ON() the return
value, which is awkward and error-prone.

This patch adds [__]blk_end_request_all() which takes @rq and @error
and fully completes the request.  BUG_ON() is added to to ensure that
this actually happens.

Most conversions are simple but there are a few noteworthy ones.

* cdrom/viocd: viocd_end_request() replaced with direct calls to
  __blk_end_request_all().

* s390/block/dasd: dasd_end_request() replaced with direct calls to
  __blk_end_request_all().

* s390/char/tape_block: tapeblock_end_request() replaced with direct
  calls to blk_end_request_all().

[ Impact: cleanup ]

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Mike Miller <mike.miller@hp.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Jeff Garzik <jgarzik@pobox.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Alex Dubov <oakad@yahoo.com>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
15 files changed:
arch/arm/plat-omap/mailbox.c
block/blk-barrier.c
block/blk-core.c
block/elevator.c
drivers/block/cpqarray.c
drivers/block/sx8.c
drivers/block/virtio_blk.c
drivers/block/xen-blkfront.c
drivers/cdrom/gdrom.c
drivers/cdrom/viocd.c
drivers/memstick/core/mspro_block.c
drivers/s390/block/dasd.c
drivers/s390/char/tape_block.c
drivers/scsi/scsi_lib.c
include/linux/blkdev.h

index 0abfbaa59871313c5ce3857457c3407f6ca4bafc..cf81bad8aec21301c2e2822c77b1691f97315718 100644 (file)
@@ -192,8 +192,7 @@ static void mbox_tx_work(struct work_struct *work)
                }
 
                spin_lock(q->queue_lock);
-               if (__blk_end_request(rq, 0, 0))
-                       BUG();
+               __blk_end_request_all(rq, 0);
                spin_unlock(q->queue_lock);
        }
 }
@@ -224,10 +223,7 @@ static void mbox_rx_work(struct work_struct *work)
                        break;
 
                msg = (mbox_msg_t) rq->data;
-
-               if (blk_end_request(rq, 0, 0))
-                       BUG();
-
+               blk_end_request_all(rq, 0);
                mbox->rxq->callback((void *)msg);
        }
 }
@@ -337,8 +333,7 @@ omap_mbox_read(struct device *dev, struct device_attribute *attr, char *buf)
 
                *p = (mbox_msg_t) rq->data;
 
-               if (blk_end_request(rq, 0, 0))
-                       BUG();
+               blk_end_request_all(rq, 0);
 
                if (unlikely(mbox_seq_test(mbox, *p))) {
                        pr_info("mbox: Illegal seq bit!(%08x) ignored\n", *p);
index 20b4111fa0507a46a641faa295124afeaf5f4ea5..c8d087655eff8f94aaa0f65047bc9eea99777e25 100644 (file)
@@ -106,10 +106,7 @@ bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
         */
        q->ordseq = 0;
        rq = q->orig_bar_rq;
-
-       if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq)))
-               BUG();
-
+       __blk_end_request_all(rq, q->orderr);
        return true;
 }
 
@@ -252,9 +249,7 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp)
                         * with prejudice.
                         */
                        elv_dequeue_request(q, rq);
-                       if (__blk_end_request(rq, -EOPNOTSUPP,
-                                             blk_rq_bytes(rq)))
-                               BUG();
+                       __blk_end_request_all(rq, -EOPNOTSUPP);
                        *rqp = NULL;
                        return false;
                }
index b84250d3019b717607531f01c0b2d1695ae31f45..0520cc70458586db66c8cd280431bb267bcc2618 100644 (file)
@@ -1780,7 +1780,7 @@ struct request *elv_next_request(struct request_queue *q)
                        break;
                } else if (ret == BLKPREP_KILL) {
                        rq->cmd_flags |= REQ_QUIET;
-                       __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
+                       __blk_end_request_all(rq, -EIO);
                } else {
                        printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
                        break;
index b03b8752e18b97f65339c9fdf1d115ec7abbcf01..1af5d9f04affe47f187d59df37a287f3f5ad8071 100644 (file)
@@ -810,7 +810,7 @@ void elv_abort_queue(struct request_queue *q)
                rq = list_entry_rq(q->queue_head.next);
                rq->cmd_flags |= REQ_QUIET;
                trace_block_rq_abort(q, rq);
-               __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
+               __blk_end_request_all(rq, -EIO);
        }
 }
 EXPORT_SYMBOL(elv_abort_queue);
index ca268ca111598588ef52d9f7ca7e0fa87cfbde86..488a8f4a60aa2d1f74830d7f18f251f3d63b7882 100644 (file)
@@ -1024,8 +1024,7 @@ static inline void complete_command(cmdlist_t *cmd, int timeout)
                                cmd->req.sg[i].size, ddir);
 
        DBGPX(printk("Done with %p\n", rq););
-       if (__blk_end_request(rq, error, blk_rq_bytes(rq)))
-               BUG();
+       __blk_end_request_all(rq, error);
 }
 
 /*
index ff0448e4bf036d36fc6e2a2a071be0ff13b19f99..60e85bb6f7902f507cd960ee8f8b976a4ad8fa7e 100644 (file)
@@ -749,8 +749,7 @@ static inline void carm_end_request_queued(struct carm_host *host,
        struct request *req = crq->rq;
        int rc;
 
-       rc = __blk_end_request(req, error, blk_rq_bytes(req));
-       assert(rc == 0);
+       __blk_end_request_all(req, error);
 
        rc = carm_put_request(host, crq);
        assert(rc == 0);
index 5d34764c8a8726d5103e6e723eaae6ab28a6a422..50745e64414e5be9a1acdb784516d034b11b618b 100644 (file)
@@ -62,7 +62,7 @@ static void blk_done(struct virtqueue *vq)
                        break;
                }
 
-               __blk_end_request(vbr->req, error, blk_rq_bytes(vbr->req));
+               __blk_end_request_all(vbr->req, error);
                list_del(&vbr->list);
                mempool_free(vbr, vblk->pool);
        }
index 8f905089b72b7e49c2434d1b6abba9fa76321051..cd6cfe3b51e1b2cec1a70867d0bff29f8a83259c 100644 (file)
@@ -551,7 +551,6 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
 
        for (i = info->ring.rsp_cons; i != rp; i++) {
                unsigned long id;
-               int ret;
 
                bret = RING_GET_RESPONSE(&info->ring, i);
                id   = bret->id;
@@ -578,8 +577,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
                                dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
                                        "request: %x\n", bret->status);
 
-                       ret = __blk_end_request(req, error, blk_rq_bytes(req));
-                       BUG_ON(ret);
+                       __blk_end_request_all(req, error);
                        break;
                default:
                        BUG();
index 2eecb779437b8057cdef2c59d37bbae71930b9c1..fee9a9e83fc944c28b3fbe300e38ef9c406d6ca9 100644 (file)
@@ -632,7 +632,7 @@ static void gdrom_readdisk_dma(struct work_struct *work)
                * before handling ending the request */
                spin_lock(&gdrom_lock);
                list_del_init(&req->queuelist);
-               __blk_end_request(req, err, blk_rq_bytes(req));
+               __blk_end_request_all(req, err);
        }
        spin_unlock(&gdrom_lock);
        kfree(read_command);
index 13929356135c837743f130c2f7de4d3cd85cc4fb..cc3efa096e1a0e9db191d890ab9ed595f442a23d 100644 (file)
@@ -291,23 +291,6 @@ static int send_request(struct request *req)
        return 0;
 }
 
-static void viocd_end_request(struct request *req, int error)
-{
-       int nsectors = req->hard_nr_sectors;
-
-       /*
-        * Make sure it's fully ended, and ensure that we process
-        * at least one sector.
-        */
-       if (blk_pc_request(req))
-               nsectors = (req->data_len + 511) >> 9;
-       if (!nsectors)
-               nsectors = 1;
-
-       if (__blk_end_request(req, error, nsectors << 9))
-               BUG();
-}
-
 static int rwreq;
 
 static void do_viocd_request(struct request_queue *q)
@@ -316,11 +299,11 @@ static void do_viocd_request(struct request_queue *q)
 
        while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) {
                if (!blk_fs_request(req))
-                       viocd_end_request(req, -EIO);
+                       __blk_end_request_all(req, -EIO);
                else if (send_request(req) < 0) {
                        printk(VIOCD_KERN_WARNING
                                        "unable to send message to OS/400!");
-                       viocd_end_request(req, -EIO);
+                       __blk_end_request_all(req, -EIO);
                } else
                        rwreq++;
        }
@@ -531,9 +514,9 @@ return_complete:
                                        "with rc %d:0x%04X: %s\n",
                                        req, event->xRc,
                                        bevent->sub_result, err->msg);
-                       viocd_end_request(req, -EIO);
+                       __blk_end_request_all(req, -EIO);
                } else
-                       viocd_end_request(req, 0);
+                       __blk_end_request_all(req, 0);
 
                /* restart handling of incoming requests */
                spin_unlock_irqrestore(&viocd_reqlock, flags);
index de143deb06f0bcbb1dc1da9e738e88cffd431491..a41634699f846e95f1dbd8f8f2d185b215c8679a 100644 (file)
@@ -826,7 +826,7 @@ static void mspro_block_submit_req(struct request_queue *q)
 
        if (msb->eject) {
                while ((req = elv_next_request(q)) != NULL)
-                       __blk_end_request(req, -ENODEV, blk_rq_bytes(req));
+                       __blk_end_request_all(req, -ENODEV);
 
                return;
        }
index d1815272c4351b90bbd972a649c8f11a3bd49710..fabec95686b0f48c651ac4cb24c43cea4e402f55 100644 (file)
@@ -1613,15 +1613,6 @@ void dasd_block_clear_timer(struct dasd_block *block)
        del_timer(&block->timer);
 }
 
-/*
- * posts the buffer_cache about a finalized request
- */
-static inline void dasd_end_request(struct request *req, int error)
-{
-       if (__blk_end_request(req, error, blk_rq_bytes(req)))
-               BUG();
-}
-
 /*
  * Process finished error recovery ccw.
  */
@@ -1676,7 +1667,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
                                      "Rejecting write request %p",
                                      req);
                        blkdev_dequeue_request(req);
-                       dasd_end_request(req, -EIO);
+                       __blk_end_request_all(req, -EIO);
                        continue;
                }
                cqr = basedev->discipline->build_cp(basedev, block, req);
@@ -1705,7 +1696,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
                                      "on request %p",
                                      PTR_ERR(cqr), req);
                        blkdev_dequeue_request(req);
-                       dasd_end_request(req, -EIO);
+                       __blk_end_request_all(req, -EIO);
                        continue;
                }
                /*
@@ -1731,7 +1722,7 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
        status = cqr->block->base->discipline->free_cp(cqr, req);
        if (status <= 0)
                error = status ? status : -EIO;
-       dasd_end_request(req, error);
+       __blk_end_request_all(req, error);
 }
 
 /*
@@ -2040,7 +2031,7 @@ static void dasd_flush_request_queue(struct dasd_block *block)
        spin_lock_irq(&block->request_queue_lock);
        while ((req = elv_next_request(block->request_queue))) {
                blkdev_dequeue_request(req);
-               dasd_end_request(req, -EIO);
+               __blk_end_request_all(req, -EIO);
        }
        spin_unlock_irq(&block->request_queue_lock);
 }
index f32e89e7c4f2e9f3f1aa54856996c446921e8a9a..86596d3813b5c05fe6572582e5264ec66a4b5417 100644 (file)
@@ -73,13 +73,6 @@ tapeblock_trigger_requeue(struct tape_device *device)
 /*
  * Post finished request.
  */
-static void
-tapeblock_end_request(struct request *req, int error)
-{
-       if (blk_end_request(req, error, blk_rq_bytes(req)))
-               BUG();
-}
-
 static void
 __tapeblock_end_request(struct tape_request *ccw_req, void *data)
 {
@@ -90,7 +83,7 @@ __tapeblock_end_request(struct tape_request *ccw_req, void *data)
 
        device = ccw_req->device;
        req = (struct request *) data;
-       tapeblock_end_request(req, (ccw_req->rc == 0) ? 0 : -EIO);
+       blk_end_request_all(req, (ccw_req->rc == 0) ? 0 : -EIO);
        if (ccw_req->rc == 0)
                /* Update position. */
                device->blk_data.block_position =
@@ -118,7 +111,7 @@ tapeblock_start_request(struct tape_device *device, struct request *req)
        ccw_req = device->discipline->bread(device, req);
        if (IS_ERR(ccw_req)) {
                DBF_EVENT(1, "TBLOCK: bread failed\n");
-               tapeblock_end_request(req, -EIO);
+               blk_end_request_all(req, -EIO);
                return PTR_ERR(ccw_req);
        }
        ccw_req->callback = __tapeblock_end_request;
@@ -131,7 +124,7 @@ tapeblock_start_request(struct tape_device *device, struct request *req)
                 * Start/enqueueing failed. No retries in
                 * this case.
                 */
-               tapeblock_end_request(req, -EIO);
+               blk_end_request_all(req, -EIO);
                device->discipline->free_bread(ccw_req);
        }
 
@@ -177,7 +170,7 @@ tapeblock_requeue(struct work_struct *work) {
                        DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
                        blkdev_dequeue_request(req);
                        spin_unlock_irq(&device->blk_data.request_queue_lock);
-                       tapeblock_end_request(req, -EIO);
+                       blk_end_request_all(req, -EIO);
                        spin_lock_irq(&device->blk_data.request_queue_lock);
                        continue;
                }
index d1cb64ad1a3f373092d954804fa41cca636ccfcd..756ac7c93de0dc29a5c7b57a3e2193b7585658cb 100644 (file)
@@ -922,7 +922,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
                        if (driver_byte(result) & DRIVER_SENSE)
                                scsi_print_sense("", cmd);
                }
-               blk_end_request(req, -EIO, blk_rq_bytes(req));
+               blk_end_request_all(req, -EIO);
                scsi_next_command(cmd);
                break;
        case ACTION_REPREP:
index 501f6845cc73708dc735a6069272fa6f6c5c20fa..e33c8356b3da2f174391abac039007821e2cdab6 100644 (file)
@@ -882,6 +882,22 @@ static inline bool blk_end_request(struct request *rq, int error,
        return blk_end_bidi_request(rq, error, nr_bytes, 0);
 }
 
+/**
+ * blk_end_request_all - Helper function for drives to finish the request.
+ * @rq: the request to finish
+ * @err: %0 for success, < %0 for error
+ *
+ * Description:
+ *     Completely finish @rq.
+ */
+static inline void blk_end_request_all(struct request *rq, int error)
+{
+       bool pending;
+
+       pending = blk_end_request(rq, error, blk_rq_bytes(rq));
+       BUG_ON(pending);
+}
+
 /**
  * __blk_end_request - Helper function for drivers to complete the request.
  * @rq:       the request being processed
@@ -901,6 +917,22 @@ static inline bool __blk_end_request(struct request *rq, int error,
        return __blk_end_bidi_request(rq, error, nr_bytes, 0);
 }
 
+/**
+ * __blk_end_request_all - Helper function for drives to finish the request.
+ * @rq: the request to finish
+ * @err: %0 for success, < %0 for error
+ *
+ * Description:
+ *     Completely finish @rq.  Must be called with queue lock held.
+ */
+static inline void __blk_end_request_all(struct request *rq, int error)
+{
+       bool pending;
+
+       pending = __blk_end_request(rq, error, blk_rq_bytes(rq));
+       BUG_ON(pending);
+}
+
 /**
  * end_request - end I/O on the current segment of the request
  * @rq:                the request being processed