Introduce rq_for_each_segment replacing rq_for_each_bio
[linux-2.6-block.git] / block / ll_rw_blk.c
index a15845c164f298c97854e21e1c7d79019af646a6..094c0fa5c4051308a1efa6f8376257c5dae4bf9f 100644 (file)
@@ -42,6 +42,7 @@ static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
 static void init_request_from_bio(struct request *req, struct bio *bio);
 static int __make_request(struct request_queue *q, struct bio *bio);
 static struct io_context *current_io_context(gfp_t gfp_flags, int node);
+static void blk_recalc_rq_segments(struct request *rq);
 
 /*
  * For the allocated request tables
@@ -1075,12 +1076,6 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq)
                 */
                return;
 
-       if (unlikely(!__test_and_clear_bit(tag, bqt->tag_map))) {
-               printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
-                      __FUNCTION__, tag);
-               return;
-       }
-
        list_del_init(&rq->queuelist);
        rq->cmd_flags &= ~REQ_QUEUED;
        rq->tag = -1;
@@ -1090,6 +1085,19 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq)
                       __FUNCTION__, tag);
 
        bqt->tag_index[tag] = NULL;
+
+       /*
+        * We use test_and_clear_bit's memory ordering properties here.
+        * The tag_map bit acts as a lock for tag_index[bit], so we need
+        * a barrer before clearing the bit (precisely: release semantics).
+        * Could use clear_bit_unlock when it is merged.
+        */
+       if (unlikely(!test_and_clear_bit(tag, bqt->tag_map))) {
+               printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
+                      __FUNCTION__, tag);
+               return;
+       }
+
        bqt->busy--;
 }
 
@@ -1136,6 +1144,10 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
                        return 1;
 
        } while (test_and_set_bit(tag, bqt->tag_map));
+       /*
+        * We rely on test_and_set_bit providing lock memory ordering semantics
+        * (could use test_and_set_bit_lock when it is merged).
+        */
 
        rq->cmd_flags |= REQ_QUEUED;
        rq->tag = tag;
@@ -1209,16 +1221,40 @@ EXPORT_SYMBOL(blk_dump_rq_flags);
 
 void blk_recount_segments(struct request_queue *q, struct bio *bio)
 {
+       struct request rq;
+       struct bio *nxt = bio->bi_next;
+       rq.q = q;
+       rq.bio = rq.biotail = bio;
+       bio->bi_next = NULL;
+       blk_recalc_rq_segments(&rq);
+       bio->bi_next = nxt;
+       bio->bi_phys_segments = rq.nr_phys_segments;
+       bio->bi_hw_segments = rq.nr_hw_segments;
+       bio->bi_flags |= (1 << BIO_SEG_VALID);
+}
+EXPORT_SYMBOL(blk_recount_segments);
+
+static void blk_recalc_rq_segments(struct request *rq)
+{
+       int nr_phys_segs;
+       int nr_hw_segs;
+       unsigned int phys_size;
+       unsigned int hw_size;
        struct bio_vec *bv, *bvprv = NULL;
-       int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster;
+       int seg_size;
+       int hw_seg_size;
+       int cluster;
+       struct req_iterator iter;
        int high, highprv = 1;
+       struct request_queue *q = rq->q;
 
-       if (unlikely(!bio->bi_io_vec))
+       if (!rq->bio)
                return;
 
        cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
-       hw_seg_size = seg_size = nr_phys_segs = nr_hw_segs = 0;
-       bio_for_each_segment(bv, bio, i) {
+       hw_seg_size = seg_size = 0;
+       phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
+       rq_for_each_segment(bv, rq, iter) {
                /*
                 * the trick here is making sure that a high page is never
                 * considered part of another segment, since that might
@@ -1244,12 +1280,13 @@ void blk_recount_segments(struct request_queue *q, struct bio *bio)
                }
 new_segment:
                if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) &&
-                   !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) {
+                   !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
                        hw_seg_size += bv->bv_len;
-               else {
+               else {
 new_hw_segment:
-                       if (hw_seg_size > bio->bi_hw_front_size)
-                               bio->bi_hw_front_size = hw_seg_size;
+                       if (nr_hw_segs == 1 &&
+                           hw_seg_size > rq->bio->bi_hw_front_size)
+                               rq->bio->bi_hw_front_size = hw_seg_size;
                        hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len;
                        nr_hw_segs++;
                }
@@ -1259,15 +1296,15 @@ new_hw_segment:
                seg_size = bv->bv_len;
                highprv = high;
        }
-       if (hw_seg_size > bio->bi_hw_back_size)
-               bio->bi_hw_back_size = hw_seg_size;
-       if (nr_hw_segs == 1 && hw_seg_size > bio->bi_hw_front_size)
-               bio->bi_hw_front_size = hw_seg_size;
-       bio->bi_phys_segments = nr_phys_segs;
-       bio->bi_hw_segments = nr_hw_segs;
-       bio->bi_flags |= (1 << BIO_SEG_VALID);
+
+       if (nr_hw_segs == 1 &&
+           hw_seg_size > rq->bio->bi_hw_front_size)
+               rq->bio->bi_hw_front_size = hw_seg_size;
+       if (hw_seg_size > rq->biotail->bi_hw_back_size)
+               rq->biotail->bi_hw_back_size = hw_seg_size;
+       rq->nr_phys_segments = nr_phys_segs;
+       rq->nr_hw_segments = nr_hw_segs;
 }
-EXPORT_SYMBOL(blk_recount_segments);
 
 static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
                                   struct bio *nxt)
@@ -1314,8 +1351,8 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
                  struct scatterlist *sg)
 {
        struct bio_vec *bvec, *bvprv;
-       struct bio *bio;
-       int nsegs, i, cluster;
+       struct req_iterator iter;
+       int nsegs, cluster;
 
        nsegs = 0;
        cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
@@ -1324,11 +1361,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
         * for each bio in rq
         */
        bvprv = NULL;
-       rq_for_each_bio(bio, rq) {
-               /*
-                * for each segment in bio
-                */
-               bio_for_each_segment(bvec, bio, i) {
+       rq_for_each_segment(bvec, rq, iter) {
                        int nbytes = bvec->bv_len;
 
                        if (bvprv && cluster) {
@@ -1351,8 +1384,7 @@ new_segment:
                                nsegs++;
                        }
                        bvprv = bvec;
-               } /* segments in bio */
-       } /* bios in rq */
+       } /* segments in rq */
 
        return nsegs;
 }
@@ -3318,48 +3350,6 @@ void submit_bio(int rw, struct bio *bio)
 
 EXPORT_SYMBOL(submit_bio);
 
-static void blk_recalc_rq_segments(struct request *rq)
-{
-       struct bio *bio, *prevbio = NULL;
-       int nr_phys_segs, nr_hw_segs;
-       unsigned int phys_size, hw_size;
-       struct request_queue *q = rq->q;
-
-       if (!rq->bio)
-               return;
-
-       phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
-       rq_for_each_bio(bio, rq) {
-               /* Force bio hw/phys segs to be recalculated. */
-               bio->bi_flags &= ~(1 << BIO_SEG_VALID);
-
-               nr_phys_segs += bio_phys_segments(q, bio);
-               nr_hw_segs += bio_hw_segments(q, bio);
-               if (prevbio) {
-                       int pseg = phys_size + prevbio->bi_size + bio->bi_size;
-                       int hseg = hw_size + prevbio->bi_size + bio->bi_size;
-
-                       if (blk_phys_contig_segment(q, prevbio, bio) &&
-                           pseg <= q->max_segment_size) {
-                               nr_phys_segs--;
-                               phys_size += prevbio->bi_size + bio->bi_size;
-                       } else
-                               phys_size = 0;
-
-                       if (blk_hw_contig_segment(q, prevbio, bio) &&
-                           hseg <= q->max_segment_size) {
-                               nr_hw_segs--;
-                               hw_size += prevbio->bi_size + bio->bi_size;
-                       } else
-                               hw_size = 0;
-               }
-               prevbio = bio;
-       }
-
-       rq->nr_phys_segments = nr_phys_segs;
-       rq->nr_hw_segments = nr_hw_segs;
-}
-
 static void blk_recalc_rq_sectors(struct request *rq, int nsect)
 {
        if (blk_fs_request(rq)) {