block: convert merge/insert code to check for REQ_OPs.
authorMike Christie <mchristi@redhat.com>
Sun, 5 Jun 2016 19:32:15 +0000 (14:32 -0500)
committerJens Axboe <axboe@fb.com>
Tue, 7 Jun 2016 19:41:38 +0000 (13:41 -0600)
This patch converts the block layer merging code to use separate variables
for the operation and flags, and to check req_op for the REQ_OP.

Signed-off-by: Mike Christie <mchristi@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
block/blk-core.c
block/blk-merge.c
include/linux/blkdev.h

index 090e55d7cad7058572314389d23b8a8553a67939..1333bb764b288fab3463b781a65049b66cc2413a 100644 (file)
@@ -2161,7 +2161,7 @@ EXPORT_SYMBOL(submit_bio);
 static int blk_cloned_rq_check_limits(struct request_queue *q,
                                      struct request *rq)
 {
-       if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) {
+       if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) {
                printk(KERN_ERR "%s: over max size limit.\n", __func__);
                return -EIO;
        }
index 5a03f967557a95240e11aa48e16e8f54d29f64b4..c265348b75d1ac14374407e408a67da80c9fcb49 100644 (file)
@@ -649,7 +649,8 @@ static int attempt_merge(struct request_queue *q, struct request *req,
        if (!rq_mergeable(req) || !rq_mergeable(next))
                return 0;
 
-       if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags))
+       if (!blk_check_merge_flags(req->cmd_flags, req_op(req), next->cmd_flags,
+                                  req_op(next)))
                return 0;
 
        /*
@@ -663,7 +664,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
            || req_no_special_merge(next))
                return 0;
 
-       if (req->cmd_flags & REQ_WRITE_SAME &&
+       if (req_op(req) == REQ_OP_WRITE_SAME &&
            !blk_write_same_mergeable(req->bio, next->bio))
                return 0;
 
@@ -751,7 +752,8 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
        if (!rq_mergeable(rq) || !bio_mergeable(bio))
                return false;
 
-       if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
+       if (!blk_check_merge_flags(rq->cmd_flags, req_op(rq), bio->bi_rw,
+                                  bio_op(bio)))
                return false;
 
        /* different data direction or already started, don't merge */
@@ -767,7 +769,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
                return false;
 
        /* must be using the same buffer */
-       if (rq->cmd_flags & REQ_WRITE_SAME &&
+       if (req_op(rq) == REQ_OP_WRITE_SAME &&
            !blk_write_same_mergeable(rq->bio, bio))
                return false;
 
index 8c78aca080af56cb9c57e0f186b3ec396011756c..25f01ff19780e81aed11807ad7b579b0f1496e4a 100644 (file)
@@ -666,16 +666,16 @@ static inline bool rq_mergeable(struct request *rq)
        return true;
 }
 
-static inline bool blk_check_merge_flags(unsigned int flags1,
-                                        unsigned int flags2)
+static inline bool blk_check_merge_flags(unsigned int flags1, unsigned int op1,
+                                        unsigned int flags2, unsigned int op2)
 {
-       if ((flags1 & REQ_DISCARD) != (flags2 & REQ_DISCARD))
+       if ((op1 == REQ_OP_DISCARD) != (op2 == REQ_OP_DISCARD))
                return false;
 
        if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE))
                return false;
 
-       if ((flags1 & REQ_WRITE_SAME) != (flags2 & REQ_WRITE_SAME))
+       if ((op1 == REQ_OP_WRITE_SAME) != (op2 == REQ_OP_WRITE_SAME))
                return false;
 
        return true;
@@ -887,12 +887,12 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
 }
 
 static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
-                                                    unsigned int cmd_flags)
+                                                    int op)
 {
-       if (unlikely(cmd_flags & REQ_DISCARD))
+       if (unlikely(op == REQ_OP_DISCARD))
                return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
 
-       if (unlikely(cmd_flags & REQ_WRITE_SAME))
+       if (unlikely(op == REQ_OP_WRITE_SAME))
                return q->limits.max_write_same_sectors;
 
        return q->limits.max_sectors;
@@ -919,11 +919,11 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
        if (unlikely(rq->cmd_type != REQ_TYPE_FS))
                return q->limits.max_hw_sectors;
 
-       if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD))
-               return blk_queue_get_max_sectors(q, rq->cmd_flags);
+       if (!q->limits.chunk_sectors || (req_op(rq) == REQ_OP_DISCARD))
+               return blk_queue_get_max_sectors(q, req_op(rq));
 
        return min(blk_max_size_offset(q, blk_rq_pos(rq)),
-                       blk_queue_get_max_sectors(q, rq->cmd_flags));
+                       blk_queue_get_max_sectors(q, req_op(rq)));
 }
 
 static inline unsigned int blk_rq_count_bios(struct request *rq)