block: rename bio bi_rw to bi_rwf for-4.8/bi_rwf
authorJens Axboe <axboe@fb.com>
Mon, 1 Aug 2016 16:22:35 +0000 (10:22 -0600)
committerJens Axboe <axboe@fb.com>
Mon, 1 Aug 2016 16:22:35 +0000 (10:22 -0600)
Since commit 63a4cc24867d, bio->bi_rw contains flags in the lower
portion and the op code in the higher portions. This means that
old code that relies on manually setting bi_rw is most likely
going to be broken. Instead of letting that brokeness linger,
rename the member, to force old and out-of-tree code to break
at compile time instead of at runtime.

No intended functional changes in this commit.

Signed-off-by: Jens Axboe <axboe@fb.com>
50 files changed:
Documentation/block/biodoc.txt
Documentation/device-mapper/dm-flakey.txt
block/bio-integrity.c
block/bio.c
block/blk-core.c
block/blk-merge.c
block/blk-mq.c
block/blk-throttle.c
block/cfq-iosched.c
drivers/block/drbd/drbd_main.c
drivers/block/drbd/drbd_receiver.c
drivers/block/drbd/drbd_req.c
drivers/block/drbd/drbd_worker.c
drivers/block/pktcdvd.c
drivers/block/umem.c
drivers/md/bcache/request.c
drivers/md/bcache/super.c
drivers/md/bcache/writeback.h
drivers/md/dm-cache-target.c
drivers/md/dm-crypt.c
drivers/md/dm-era-target.c
drivers/md/dm-flakey.c
drivers/md/dm-io.c
drivers/md/dm-log-writes.c
drivers/md/dm-mpath.c
drivers/md/dm-raid1.c
drivers/md/dm-region-hash.c
drivers/md/dm-snap.c
drivers/md/dm-stripe.c
drivers/md/dm-thin.c
drivers/md/dm-zero.c
drivers/md/dm.c
drivers/md/linear.c
drivers/md/md.c
drivers/md/multipath.c
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5-cache.c
drivers/md/raid5.c
fs/btrfs/check-integrity.c
fs/btrfs/disk-io.c
fs/btrfs/inode.c
fs/btrfs/volumes.c
include/linux/bio.h
include/linux/blk-cgroup.h
include/linux/blk_types.h
include/trace/events/bcache.h
include/trace/events/block.h
kernel/trace/blktrace.c

index 5be8a7f4cc7f0e0e181143ed0af847a489045de3..dec33df3453ed00129f60680c8a891c6031fe640 100644 (file)
@@ -269,7 +269,7 @@ Arjan's proposed request priority scheme allows higher levels some broad
   requests which haven't aged too much on the queue. Potentially this priority
   could even be exposed to applications in some manner, providing higher level
   tunability. Time based aging avoids starvation of lower priority
-  requests. Some bits in the bi_rw flags field in the bio structure are
+  requests. Some bits in the bi_rwf flags field in the bio structure are
   intended to be used for this priority information.
 
 
@@ -432,7 +432,7 @@ struct bio {
        struct bio          *bi_next;    /* request queue link */
        struct block_device *bi_bdev;   /* target device */
        unsigned long       bi_flags;    /* status, command, etc */
-       unsigned long       bi_rw;       /* low bits: r/w, high: priority */
+       unsigned long       bi_rwf;       /* low bits: r/w, high: priority */
 
        unsigned int    bi_vcnt;     /* how may bio_vec's */
        struct bvec_iter        bi_iter;        /* current index into bio_vec array */
index 6ff5c2327227f2040e43ec2b74f97a2bd74ca11f..c58ab5411a216f614e68e660f0a35a8059c8e168 100644 (file)
@@ -42,7 +42,7 @@ Optional feature parameters:
     <direction>: Either 'r' to corrupt reads or 'w' to corrupt writes.
                 'w' is incompatible with drop_writes.
     <value>: The value (from 0-255) to write.
-    <flags>: Perform the replacement only if bio->bi_rw has all the
+    <flags>: Perform the replacement only if bio->bi_rwf has all the
             selected flags set.
 
 Examples:
index f70cc3bdfd012949d9b89bc62645b13e98210ed1..09cc72cd0c2cbe96745b6e36245f70ccd154700a 100644 (file)
@@ -86,7 +86,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
 
        bip->bip_bio = bio;
        bio->bi_integrity = bip;
-       bio->bi_rw |= REQ_INTEGRITY;
+       bio->bi_rwf |= REQ_INTEGRITY;
 
        return bip;
 err:
index 3f76a38a5e2d66f81fe78e4d3feffe825e3214eb..beef39fc2f5ca5460d15171d7629235df0ce5a1a 100644 (file)
@@ -580,7 +580,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
         */
        bio->bi_bdev = bio_src->bi_bdev;
        bio_set_flag(bio, BIO_CLONED);
-       bio->bi_rw = bio_src->bi_rw;
+       bio->bi_rwf = bio_src->bi_rwf;
        bio->bi_iter = bio_src->bi_iter;
        bio->bi_io_vec = bio_src->bi_io_vec;
 
@@ -663,7 +663,7 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
        if (!bio)
                return NULL;
        bio->bi_bdev            = bio_src->bi_bdev;
-       bio->bi_rw              = bio_src->bi_rw;
+       bio->bi_rwf             = bio_src->bi_rwf;
        bio->bi_iter.bi_sector  = bio_src->bi_iter.bi_sector;
        bio->bi_iter.bi_size    = bio_src->bi_iter.bi_size;
 
@@ -873,7 +873,7 @@ int submit_bio_wait(struct bio *bio)
        init_completion(&ret.event);
        bio->bi_private = &ret;
        bio->bi_end_io = submit_bio_wait_endio;
-       bio->bi_rw |= REQ_SYNC;
+       bio->bi_rwf |= REQ_SYNC;
        submit_bio(bio);
        wait_for_completion_io(&ret.event);
 
index a687e9cc16c29fd765d133acc9199d92f5ce2377..5630648c752c918c3e8d876d284f922adbb46c06 100644 (file)
@@ -1029,7 +1029,7 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
         * Flush requests do not use the elevator so skip initialization.
         * This allows a request to share the flush and elevator data.
         */
-       if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA))
+       if (bio->bi_rwf & (REQ_PREFLUSH | REQ_FUA))
                return false;
 
        return true;
@@ -1504,7 +1504,7 @@ EXPORT_SYMBOL_GPL(blk_add_request_payload);
 bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
                            struct bio *bio)
 {
-       const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
+       const int ff = bio->bi_rwf & REQ_FAILFAST_MASK;
 
        if (!ll_back_merge_fn(q, req, bio))
                return false;
@@ -1526,7 +1526,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
 bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
                             struct bio *bio)
 {
-       const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
+       const int ff = bio->bi_rwf & REQ_FAILFAST_MASK;
 
        if (!ll_front_merge_fn(q, req, bio))
                return false;
@@ -1648,8 +1648,8 @@ void init_request_from_bio(struct request *req, struct bio *bio)
 {
        req->cmd_type = REQ_TYPE_FS;
 
-       req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
-       if (bio->bi_rw & REQ_RAHEAD)
+       req->cmd_flags |= bio->bi_rwf & REQ_COMMON_MASK;
+       if (bio->bi_rwf & REQ_RAHEAD)
                req->cmd_flags |= REQ_FAILFAST_MASK;
 
        req->errors = 0;
@@ -1660,7 +1660,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
 
 static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
 {
-       const bool sync = !!(bio->bi_rw & REQ_SYNC);
+       const bool sync = !!(bio->bi_rwf & REQ_SYNC);
        struct blk_plug *plug;
        int el_ret, rw_flags = 0, where = ELEVATOR_INSERT_SORT;
        struct request *req;
@@ -1681,7 +1681,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
                return BLK_QC_T_NONE;
        }
 
-       if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) {
+       if (bio->bi_rwf & (REQ_PREFLUSH | REQ_FUA)) {
                spin_lock_irq(q->queue_lock);
                where = ELEVATOR_INSERT_FLUSH;
                goto get_rq;
@@ -1728,7 +1728,7 @@ get_rq:
        /*
         * Add in META/PRIO flags, if set, before we get to the IO scheduler
         */
-       rw_flags |= (bio->bi_rw & (REQ_META | REQ_PRIO));
+       rw_flags |= (bio->bi_rwf & (REQ_META | REQ_PRIO));
 
        /*
         * Grab a free request. This is might sleep but can not fail.
@@ -1805,7 +1805,7 @@ static void handle_bad_sector(struct bio *bio)
        printk(KERN_INFO "attempt to access beyond end of device\n");
        printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
                        bdevname(bio->bi_bdev, b),
-                       bio->bi_rw,
+                       bio->bi_rwf,
                        (unsigned long long)bio_end_sector(bio),
                        (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
 }
@@ -1918,9 +1918,9 @@ generic_make_request_checks(struct bio *bio)
         * drivers without flush support don't have to worry
         * about them.
         */
-       if ((bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) &&
+       if ((bio->bi_rwf & (REQ_PREFLUSH | REQ_FUA)) &&
            !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
-               bio->bi_rw &= ~(REQ_PREFLUSH | REQ_FUA);
+               bio->bi_rwf &= ~(REQ_PREFLUSH | REQ_FUA);
                if (!nr_sectors) {
                        err = 0;
                        goto end_io;
@@ -2219,7 +2219,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq)
         * one.
         */
        for (bio = rq->bio; bio; bio = bio->bi_next) {
-               if ((bio->bi_rw & ff) != ff)
+               if ((bio->bi_rwf & ff) != ff)
                        break;
                bytes += bio->bi_iter.bi_size;
        }
@@ -2630,7 +2630,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
        /* mixed attributes always follow the first bio */
        if (req->cmd_flags & REQ_MIXED_MERGE) {
                req->cmd_flags &= ~REQ_FAILFAST_MASK;
-               req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK;
+               req->cmd_flags |= req->bio->bi_rwf & REQ_FAILFAST_MASK;
        }
 
        /*
index 41cbd48789582d06e9933576782850ef793f3bde..a39f01818ab8d19463f3d445e28ea86994cefdc4 100644 (file)
@@ -186,7 +186,7 @@ void blk_queue_split(struct request_queue *q, struct bio **bio,
 
        if (split) {
                /* there isn't chance to merge the splitted bio */
-               split->bi_rw |= REQ_NOMERGE;
+               split->bi_rwf |= REQ_NOMERGE;
 
                bio_chain(split, *bio);
                trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
@@ -616,9 +616,9 @@ void blk_rq_set_mixed_merge(struct request *rq)
         * Distributes the attributs to each bio.
         */
        for (bio = rq->bio; bio; bio = bio->bi_next) {
-               WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
-                            (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
-               bio->bi_rw |= ff;
+               WARN_ON_ONCE((bio->bi_rwf & REQ_FAILFAST_MASK) &&
+                            (bio->bi_rwf & REQ_FAILFAST_MASK) != ff);
+               bio->bi_rwf |= ff;
        }
        rq->cmd_flags |= REQ_MIXED_MERGE;
 }
index 6a63da101bc4db2126ae768ade7f6719426b2fa3..8ef42426238909d89e1448ca5910a7325237623f 100644 (file)
@@ -1234,7 +1234,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
        ctx = blk_mq_get_ctx(q);
        hctx = q->mq_ops->map_queue(q, ctx->cpu);
 
-       if (rw_is_sync(bio_op(bio), bio->bi_rw))
+       if (rw_is_sync(bio_op(bio), bio->bi_rwf))
                op_flags |= REQ_SYNC;
 
        trace_block_getrq(q, bio, op);
@@ -1302,8 +1302,8 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
  */
 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 {
-       const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw);
-       const int is_flush_fua = bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
+       const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rwf);
+       const int is_flush_fua = bio->bi_rwf & (REQ_PREFLUSH | REQ_FUA);
        struct blk_map_ctx data;
        struct request *rq;
        unsigned int request_count = 0;
@@ -1396,8 +1396,8 @@ done:
  */
 static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
 {
-       const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw);
-       const int is_flush_fua = bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
+       const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rwf);
+       const int is_flush_fua = bio->bi_rwf & (REQ_PREFLUSH | REQ_FUA);
        struct blk_plug *plug;
        unsigned int request_count = 0;
        struct blk_map_ctx data;
index c5494e40323992705ad56167267ea971251b0569..78dfd5857d6d9eedcfe63c392f48d8e0990883e7 100644 (file)
@@ -821,8 +821,8 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
         * second time when it eventually gets issued.  Set it when a bio
         * is being charged to a tg.
         */
-       if (!(bio->bi_rw & REQ_THROTTLED))
-               bio->bi_rw |= REQ_THROTTLED;
+       if (!(bio->bi_rwf & REQ_THROTTLED))
+               bio->bi_rwf |= REQ_THROTTLED;
 }
 
 /**
@@ -1399,7 +1399,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
        WARN_ON_ONCE(!rcu_read_lock_held());
 
        /* see throtl_charge_bio() */
-       if ((bio->bi_rw & REQ_THROTTLED) || !tg->has_rules[rw])
+       if ((bio->bi_rwf & REQ_THROTTLED) || !tg->has_rules[rw])
                goto out;
 
        spin_lock_irq(q->queue_lock);
@@ -1478,7 +1478,7 @@ out:
         * being issued.
         */
        if (!throttled)
-               bio->bi_rw &= ~REQ_THROTTLED;
+               bio->bi_rwf &= ~REQ_THROTTLED;
        return throttled;
 }
 
index acabba198de936cd9fc4b2e679947fb526c9c0f1..3897725dd9c1936611b7c16bf1e5c8b0225b5bef 100644 (file)
@@ -918,7 +918,7 @@ static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
  */
 static inline bool cfq_bio_sync(struct bio *bio)
 {
-       return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
+       return bio_data_dir(bio) == READ || (bio->bi_rwf & REQ_SYNC);
 }
 
 /*
@@ -2565,7 +2565,7 @@ static void cfq_merged_request(struct request_queue *q, struct request *req,
 static void cfq_bio_merged(struct request_queue *q, struct request *req,
                                struct bio *bio)
 {
-       cfqg_stats_update_io_merged(RQ_CFQG(req), bio_op(bio), bio->bi_rw);
+       cfqg_stats_update_io_merged(RQ_CFQG(req), bio_op(bio), bio->bi_rwf);
 }
 
 static void
index 0501ae0c517b089eb411aa1b77ad3c0ee0cd8dfe..30f91717890aa42b009d0601a00c9d00f7f7a949 100644 (file)
@@ -1663,13 +1663,13 @@ static u32 bio_flags_to_wire(struct drbd_connection *connection,
                             struct bio *bio)
 {
        if (connection->agreed_pro_version >= 95)
-               return  (bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
-                       (bio->bi_rw & REQ_FUA ? DP_FUA : 0) |
-                       (bio->bi_rw & REQ_PREFLUSH ? DP_FLUSH : 0) |
+               return  (bio->bi_rwf & REQ_SYNC ? DP_RW_SYNC : 0) |
+                       (bio->bi_rwf & REQ_FUA ? DP_FUA : 0) |
+                       (bio->bi_rwf & REQ_PREFLUSH ? DP_FLUSH : 0) |
                        (bio_op(bio) == REQ_OP_WRITE_SAME ? DP_WSAME : 0) |
                        (bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0);
        else
-               return bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
+               return bio->bi_rwf & REQ_SYNC ? DP_RW_SYNC : 0;
 }
 
 /* Used to send write or TRIM aka REQ_DISCARD requests
index df45713dfbe8694772ea564976a6a6d3449bd2d1..334ae22ca10d3b4340c18376c2d44accaed93981 100644 (file)
@@ -1564,7 +1564,7 @@ static void drbd_issue_peer_wsame(struct drbd_device *device,
  * drbd_submit_peer_request()
  * @device:    DRBD device.
  * @peer_req:  peer request
- * @rw:                flag field, see bio->bi_rw
+ * @rw:                flag field, see bio->bi_rwf
  *
  * May spread the pages to multiple bios,
  * depending on bio_add_page restrictions.
index 66b8e4bb74d8ab099b090103bd1ee1d4ce86c6f9..6a496597209f73a12ccc6ed2628e1179a70c4df8 100644 (file)
@@ -288,7 +288,7 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
         */
        if (!ok &&
            bio_op(req->master_bio) == REQ_OP_READ &&
-           !(req->master_bio->bi_rw & REQ_RAHEAD) &&
+           !(req->master_bio->bi_rwf & REQ_RAHEAD) &&
            !list_empty(&req->tl_requests))
                req->rq_state |= RQ_POSTPONED;
 
@@ -1137,7 +1137,7 @@ static int drbd_process_write_request(struct drbd_request *req)
         * replicating, in which case there is no point. */
        if (unlikely(req->i.size == 0)) {
                /* The only size==0 bios we expect are empty flushes. */
-               D_ASSERT(device, req->master_bio->bi_rw & REQ_PREFLUSH);
+               D_ASSERT(device, req->master_bio->bi_rwf & REQ_PREFLUSH);
                if (remote)
                        _req_mod(req, QUEUE_AS_DRBD_BARRIER);
                return remote;
@@ -1176,7 +1176,7 @@ drbd_submit_req_private_bio(struct drbd_request *req)
 
        if (bio_op(bio) != REQ_OP_READ)
                type = DRBD_FAULT_DT_WR;
-       else if (bio->bi_rw & REQ_RAHEAD)
+       else if (bio->bi_rwf & REQ_RAHEAD)
                type = DRBD_FAULT_DT_RA;
        else
                type = DRBD_FAULT_DT_RD;
index 35dbb3dca47ef6a609811c03fefcc1bb4fc2f101..e58720f3b3dc2fed324ba27647806b856b7a02ed 100644 (file)
@@ -256,7 +256,7 @@ void drbd_request_endio(struct bio *bio)
                                what = DISCARD_COMPLETED_WITH_ERROR;
                        break;
                case REQ_OP_READ:
-                       if (bio->bi_rw & REQ_RAHEAD)
+                       if (bio->bi_rwf & REQ_RAHEAD)
                                what = READ_AHEAD_COMPLETED_WITH_ERROR;
                        else
                                what = READ_COMPLETED_WITH_ERROR;
index 9393bc730acf96e83f1ae9412f5304c27c2ec8ae..90fa4ac149dbe04e79c6dc2d954225b6f988079b 100644 (file)
@@ -1157,7 +1157,7 @@ static int pkt_start_recovery(struct packet_data *pkt)
 
        bio_reset(pkt->bio);
        pkt->bio->bi_bdev = pd->bdev;
-       pkt->bio->bi_rw = REQ_WRITE;
+       bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0);
        pkt->bio->bi_iter.bi_sector = new_sector;
        pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
        pkt->bio->bi_vcnt = pkt->frames;
index d0a3e6d4515f8246bab82dab698cfcc8b58f800c..f4ec6e1fe4bf35edfb2210b7e9984ce40ff0cdfe 100644 (file)
@@ -535,7 +535,7 @@ static blk_qc_t mm_make_request(struct request_queue *q, struct bio *bio)
        *card->biotail = bio;
        bio->bi_next = NULL;
        card->biotail = &bio->bi_next;
-       if (bio->bi_rw & REQ_SYNC || !mm_check_plugged(card))
+       if (bio->bi_rwf & REQ_SYNC || !mm_check_plugged(card))
                activate(card);
        spin_unlock_irq(&card->lock);
 
index 69f16f43f8ab12fda3ddb9a2cfefa1d2abd1bf7b..941e5bd1a7b60ad1a8d929f90ff4d12b99750131 100644 (file)
@@ -208,7 +208,7 @@ static void bch_data_insert_start(struct closure *cl)
         * Journal writes are marked REQ_PREFLUSH; if the original write was a
         * flush, it'll wait on the journal write.
         */
-       bio->bi_rw &= ~(REQ_PREFLUSH|REQ_FUA);
+       bio->bi_rwf &= ~(REQ_PREFLUSH|REQ_FUA);
 
        do {
                unsigned i;
@@ -405,7 +405,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
        if (!congested &&
            mode == CACHE_MODE_WRITEBACK &&
            op_is_write(bio_op(bio)) &&
-           (bio->bi_rw & REQ_SYNC))
+           (bio->bi_rwf & REQ_SYNC))
                goto rescale;
 
        spin_lock(&dc->io_lock);
@@ -668,7 +668,7 @@ static inline struct search *search_alloc(struct bio *bio,
        s->iop.write_prio       = 0;
        s->iop.error            = 0;
        s->iop.flags            = 0;
-       s->iop.flush_journal    = (bio->bi_rw & (REQ_PREFLUSH|REQ_FUA)) != 0;
+       s->iop.flush_journal    = (bio->bi_rwf & (REQ_PREFLUSH|REQ_FUA)) != 0;
        s->iop.wq               = bcache_wq;
 
        return s;
@@ -796,8 +796,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
                goto out_submit;
        }
 
-       if (!(bio->bi_rw & REQ_RAHEAD) &&
-           !(bio->bi_rw & REQ_META) &&
+       if (!(bio->bi_rwf & REQ_RAHEAD) &&
+           !(bio->bi_rwf & REQ_META) &&
            s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
                reada = min_t(sector_t, dc->readahead >> 9,
                              bdev_sectors(bio->bi_bdev) - bio_end_sector(bio));
@@ -920,7 +920,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
                bch_writeback_add(dc);
                s->iop.bio = bio;
 
-               if (bio->bi_rw & REQ_PREFLUSH) {
+               if (bio->bi_rwf & REQ_PREFLUSH) {
                        /* Also need to send a flush to the backing device */
                        struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
                                                             dc->disk.bio_split);
index 88ef6d14cce399b36a881807335a4b7b9fe4a66e..1e8a269a1fd57cbc81cff47dd03c1b7623788169 100644 (file)
@@ -347,7 +347,7 @@ static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
        for (i = 0; i < KEY_PTRS(k); i++) {
                struct bio *bio = bch_bbio_alloc(c);
 
-               bio->bi_rw      = REQ_SYNC|REQ_META|op_flags;
+               bio->bi_rwf = REQ_SYNC | REQ_META | op_flags;
                bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
 
                bio->bi_end_io  = uuid_endio;
index 073a042aed243b2660f6b70a380d0647c709aa65..7fec96e8ab0a8e8ce69da12aa13891c4ed8dac5a 100644 (file)
@@ -57,7 +57,7 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
        if (would_skip)
                return false;
 
-       return bio->bi_rw & REQ_SYNC ||
+       return bio->bi_rwf & REQ_SYNC ||
                in_use <= CUTOFF_WRITEBACK;
 }
 
index 718744db62df7f37ddbdaaebecd87bd9b7707002..afd7322e15fa04127310f18d9c591ddb5c3a401c 100644 (file)
@@ -788,7 +788,7 @@ static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
 
        spin_lock_irqsave(&cache->lock, flags);
        if (cache->need_tick_bio &&
-           !(bio->bi_rw & (REQ_FUA | REQ_PREFLUSH)) &&
+           !(bio->bi_rwf & (REQ_FUA | REQ_PREFLUSH)) &&
            bio_op(bio) != REQ_OP_DISCARD) {
                pb->tick = true;
                cache->need_tick_bio = false;
@@ -830,7 +830,7 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
 
 static int bio_triggers_commit(struct cache *cache, struct bio *bio)
 {
-       return bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
+       return bio->bi_rwf & (REQ_PREFLUSH | REQ_FUA);
 }
 
 /*
@@ -1069,7 +1069,7 @@ static void dec_io_migrations(struct cache *cache)
 static bool discard_or_flush(struct bio *bio)
 {
        return bio_op(bio) == REQ_OP_DISCARD ||
-              bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
+              bio->bi_rwf & (REQ_PREFLUSH | REQ_FUA);
 }
 
 static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell)
@@ -1980,7 +1980,7 @@ static void process_deferred_bios(struct cache *cache)
 
                bio = bio_list_pop(&bios);
 
-               if (bio->bi_rw & REQ_PREFLUSH)
+               if (bio->bi_rwf & REQ_PREFLUSH)
                        process_flush_bio(cache, bio);
                else if (bio_op(bio) == REQ_OP_DISCARD)
                        process_discard_bio(cache, &structs, bio);
index 8f2e3e2ffd26aaac40ccec04f9257d65ed2d962d..8bb7ec30d8390d89b5e0152f0e7f0dede56173ce 100644 (file)
@@ -1136,7 +1136,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
        clone->bi_private = io;
        clone->bi_end_io  = crypt_endio;
        clone->bi_bdev    = cc->dev->bdev;
-       bio_set_op_attrs(clone, bio_op(io->base_bio), io->base_bio->bi_rw);
+       bio_set_op_attrs(clone, bio_op(io->base_bio), io->base_bio->bi_rwf);
 }
 
 static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
@@ -1915,7 +1915,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
         * - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight
         * - for REQ_OP_DISCARD caller must use flush if IO ordering matters
         */
-       if (unlikely(bio->bi_rw & REQ_PREFLUSH ||
+       if (unlikely(bio->bi_rwf & REQ_PREFLUSH ||
            bio_op(bio) == REQ_OP_DISCARD)) {
                bio->bi_bdev = cc->dev->bdev;
                if (bio_sectors(bio))
index 2faf49d8f4d768467dbf2c376bce6d784ea3fed8..cc6fe90a41bdc50c9d3465fdd2633cc62a0dd61d 100644 (file)
@@ -1542,7 +1542,7 @@ static int era_map(struct dm_target *ti, struct bio *bio)
        /*
         * REQ_PREFLUSH bios carry no data, so we're not interested in them.
         */
-       if (!(bio->bi_rw & REQ_PREFLUSH) &&
+       if (!(bio->bi_rwf & REQ_PREFLUSH) &&
            (bio_data_dir(bio) == WRITE) &&
            !metadata_current_marked(era->md, block)) {
                defer_bio(era, bio);
index 29b99fb6a16a4c29670d947eb32ec40f1f270f98..aeb5d8c99de05e18aeda85a46fddefca5bae6dde 100644 (file)
@@ -16,7 +16,7 @@
 #define DM_MSG_PREFIX "flakey"
 
 #define all_corrupt_bio_flags_match(bio, fc)   \
-       (((bio)->bi_rw & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
+       (((bio)->bi_rwf & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
 
 /*
  * Flakey: Used for testing only, simulates intermittent,
@@ -266,9 +266,9 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
                data[fc->corrupt_bio_byte - 1] = fc->corrupt_bio_value;
 
                DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
-                       "(rw=%c bi_rw=%u bi_sector=%llu cur_bytes=%u)\n",
+                       "(rw=%c bi_rwf=%u bi_sector=%llu cur_bytes=%u)\n",
                        bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
-                       (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw,
+                       (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rwf,
                        (unsigned long long)bio->bi_iter.bi_sector, bio_bytes);
        }
 }
index daa03e41654ae99f63fe3b8e060a9169a89758b1..7c9141a79f9db15a0122dacd8d089411b99ee53a 100644 (file)
@@ -505,9 +505,9 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
  * New collapsed (a)synchronous interface.
  *
  * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
- * the queue with blk_unplug() some time later or set REQ_SYNC in io_req->bi_rw.
- * If you fail to do one of these, the IO will be submitted to the disk after
- * q->unplug_delay, which defaults to 3ms in blk-settings.c.
+ * the queue with blk_unplug() some time later or set REQ_SYNC in
+ * io_req->bi_rwf. If you fail to do one of these, the IO will be submitted to
+ * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
  */
 int dm_io(struct dm_io_request *io_req, unsigned num_regions,
          struct dm_io_region *where, unsigned long *sync_error_bits)
index b5dbf7a0515e0c7cb18ab149ba887dae93bfac39..de864196af58c724d8b726e4b217974c86269390 100644 (file)
@@ -555,8 +555,8 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio)
        struct bio_vec bv;
        size_t alloc_size;
        int i = 0;
-       bool flush_bio = (bio->bi_rw & REQ_PREFLUSH);
-       bool fua_bio = (bio->bi_rw & REQ_FUA);
+       bool flush_bio = (bio->bi_rwf & REQ_PREFLUSH);
+       bool fua_bio = (bio->bi_rwf & REQ_FUA);
        bool discard_bio = (bio_op(bio) == REQ_OP_DISCARD);
 
        pb->block = NULL;
index 7eac080fcb18cd67110888d323c0c460068c21e1..9e0979b3e4aa86d8fe245a05758fcb5527face8a 100644 (file)
@@ -647,7 +647,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
 
        bio->bi_error = 0;
        bio->bi_bdev = pgpath->path.dev->bdev;
-       bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
+       bio->bi_rwf |= REQ_FAILFAST_TRANSPORT;
 
        if (pgpath->pg->ps.type->start_io)
                pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
index dac55b254a09a7fb76d1745ff7f0660338211e2a..f443bb32a4ad2dbf6b69a769f0aa75b0b4e3ac26 100644 (file)
@@ -657,7 +657,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
        struct mirror *m;
        struct dm_io_request io_req = {
                .bi_op = REQ_OP_WRITE,
-               .bi_op_flags = bio->bi_rw & WRITE_FLUSH_FUA,
+               .bi_op_flags = bio->bi_rwf & WRITE_FLUSH_FUA,
                .mem.type = DM_IO_BIO,
                .mem.ptr.bio = bio,
                .notify.fn = write_callback,
@@ -704,7 +704,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
        bio_list_init(&requeue);
 
        while ((bio = bio_list_pop(writes))) {
-               if ((bio->bi_rw & REQ_PREFLUSH) ||
+               if ((bio->bi_rwf & REQ_PREFLUSH) ||
                    (bio_op(bio) == REQ_OP_DISCARD)) {
                        bio_list_add(&sync, bio);
                        continue;
@@ -1217,7 +1217,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
         * If region is not in-sync queue the bio.
         */
        if (!r || (r == -EWOULDBLOCK)) {
-               if (bio->bi_rw & REQ_RAHEAD)
+               if (bio->bi_rwf & REQ_RAHEAD)
                        return -EWOULDBLOCK;
 
                queue_bio(ms, bio, rw);
@@ -1253,7 +1253,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
         * We need to dec pending if this was a write.
         */
        if (rw == WRITE) {
-               if (!(bio->bi_rw & REQ_PREFLUSH) &&
+               if (!(bio->bi_rwf & REQ_PREFLUSH) &&
                    bio_op(bio) != REQ_OP_DISCARD)
                        dm_rh_dec(ms->rh, bio_record->write_region);
                return error;
@@ -1262,7 +1262,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
        if (error == -EOPNOTSUPP)
                goto out;
 
-       if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
+       if ((error == -EWOULDBLOCK) && (bio->bi_rwf & REQ_RAHEAD))
                goto out;
 
        if (unlikely(error)) {
index b11813431f31eb170955ee1a0b14329f403b15f1..05f540b895b5dafb288f532c962bcd239a550e87 100644 (file)
@@ -398,7 +398,7 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
        region_t region = dm_rh_bio_to_region(rh, bio);
        int recovering = 0;
 
-       if (bio->bi_rw & REQ_PREFLUSH) {
+       if (bio->bi_rwf & REQ_PREFLUSH) {
                rh->flush_failure = 1;
                return;
        }
@@ -526,7 +526,7 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
        struct bio *bio;
 
        for (bio = bios->head; bio; bio = bio->bi_next) {
-               if (bio->bi_rw & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD)
+               if (bio->bi_rwf & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD)
                        continue;
                rh_inc(rh, dm_rh_bio_to_region(rh, bio));
        }
index 731e1f5bd89574deb711419ebe8eaf7a544e605d..b107a1bad4b196f00683e59634e8138007af8c44 100644 (file)
@@ -1680,7 +1680,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
 
        init_tracked_chunk(bio);
 
-       if (bio->bi_rw & REQ_PREFLUSH) {
+       if (bio->bi_rwf & REQ_PREFLUSH) {
                bio->bi_bdev = s->cow->bdev;
                return DM_MAPIO_REMAPPED;
        }
@@ -1800,7 +1800,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
 
        init_tracked_chunk(bio);
 
-       if (bio->bi_rw & REQ_PREFLUSH) {
+       if (bio->bi_rwf & REQ_PREFLUSH) {
                if (!dm_bio_get_target_bio_nr(bio))
                        bio->bi_bdev = s->origin->bdev;
                else
@@ -2286,7 +2286,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
 
        bio->bi_bdev = o->dev->bdev;
 
-       if (unlikely(bio->bi_rw & REQ_PREFLUSH))
+       if (unlikely(bio->bi_rwf & REQ_PREFLUSH))
                return DM_MAPIO_REMAPPED;
 
        if (bio_data_dir(bio) != WRITE)
index 01bb9cf2a8c2318e1b5cf704728637acf95665c9..029e0c241eb2b8613110ea7aaec5192b588957c0 100644 (file)
@@ -286,7 +286,7 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
        uint32_t stripe;
        unsigned target_bio_nr;
 
-       if (bio->bi_rw & REQ_PREFLUSH) {
+       if (bio->bi_rwf & REQ_PREFLUSH) {
                target_bio_nr = dm_bio_get_target_bio_nr(bio);
                BUG_ON(target_bio_nr >= sc->stripes);
                bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev;
@@ -383,7 +383,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
        if (!error)
                return 0; /* I/O complete */
 
-       if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
+       if ((error == -EWOULDBLOCK) && (bio->bi_rwf & REQ_RAHEAD))
                return error;
 
        if (error == -EOPNOTSUPP)
index 197ea200340029febe2ff5fe082ae07da16038c3..3013c78bca95574fe784f1be100f3d82cd6fb803 100644 (file)
@@ -699,7 +699,7 @@ static void remap_to_origin(struct thin_c *tc, struct bio *bio)
 
 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
 {
-       return (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) &&
+       return (bio->bi_rwf & (REQ_PREFLUSH | REQ_FUA)) &&
                dm_thin_changed_this_transaction(tc->td);
 }
 
@@ -870,7 +870,7 @@ static void __inc_remap_and_issue_cell(void *context,
        struct bio *bio;
 
        while ((bio = bio_list_pop(&cell->bios))) {
-               if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) ||
+               if (bio->bi_rwf & (REQ_PREFLUSH | REQ_FUA) ||
                    bio_op(bio) == REQ_OP_DISCARD)
                        bio_list_add(&info->defer_bios, bio);
                else {
@@ -1717,7 +1717,7 @@ static void __remap_and_issue_shared_cell(void *context,
 
        while ((bio = bio_list_pop(&cell->bios))) {
                if ((bio_data_dir(bio) == WRITE) ||
-                   (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) ||
+                   (bio->bi_rwf & (REQ_PREFLUSH | REQ_FUA) ||
                     bio_op(bio) == REQ_OP_DISCARD))
                        bio_list_add(&info->defer_bios, bio);
                else {
@@ -2635,7 +2635,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
                return DM_MAPIO_SUBMITTED;
        }
 
-       if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) ||
+       if (bio->bi_rwf & (REQ_PREFLUSH | REQ_FUA) ||
            bio_op(bio) == REQ_OP_DISCARD) {
                thin_defer_bio_with_throttle(tc, bio);
                return DM_MAPIO_SUBMITTED;
index 618b8752dcf1479733764592930902232ec47b27..43349420631470b781e0cd6e49e5d0aeb4a62b24 100644 (file)
@@ -37,7 +37,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio)
 {
        switch (bio_op(bio)) {
        case REQ_OP_READ:
-               if (bio->bi_rw & REQ_RAHEAD) {
+               if (bio->bi_rwf & REQ_RAHEAD) {
                        /* readahead of null bytes only wastes buffer cache */
                        return -EIO;
                }
index ceb69fc0b10b32773bd41d3c0b8beadf442571f7..d59a57191dd4712c1299255247c3d28e509540d6 100644 (file)
@@ -798,12 +798,12 @@ static void dec_pending(struct dm_io *io, int error)
                if (io_error == DM_ENDIO_REQUEUE)
                        return;
 
-               if ((bio->bi_rw & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
+               if ((bio->bi_rwf & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
                        /*
                         * Preflush done for flush with data, reissue
                         * without REQ_PREFLUSH.
                         */
-                       bio->bi_rw &= ~REQ_PREFLUSH;
+                       bio->bi_rwf &= ~REQ_PREFLUSH;
                        queue_io(md, bio);
                } else {
                        /* done with normal IO or empty flush */
@@ -964,7 +964,7 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
 {
        struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
        unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
-       BUG_ON(bio->bi_rw & REQ_PREFLUSH);
+       BUG_ON(bio->bi_rwf & REQ_PREFLUSH);
        BUG_ON(bi_size > *tio->len_ptr);
        BUG_ON(n_sectors > bi_size);
        *tio->len_ptr -= bi_size - n_sectors;
@@ -1252,7 +1252,7 @@ static void __split_and_process_bio(struct mapped_device *md,
 
        start_io_acct(ci.io);
 
-       if (bio->bi_rw & REQ_PREFLUSH) {
+       if (bio->bi_rwf & REQ_PREFLUSH) {
                ci.bio = &ci.md->flush_bio;
                ci.sector_count = 0;
                error = __send_empty_flush(&ci);
@@ -1290,7 +1290,7 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
        if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
                dm_put_live_table(md, srcu_idx);
 
-               if (!(bio->bi_rw & REQ_RAHEAD))
+               if (!(bio->bi_rwf & REQ_RAHEAD))
                        queue_io(md, bio);
                else
                        bio_io_error(bio);
index 70ff888d25d0864e1df9f181430e75ba42e89ccd..8acc726eb1147e60951da31aec42b99e55945a86 100644 (file)
@@ -221,7 +221,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
        struct bio *split;
        sector_t start_sector, end_sector, data_offset;
 
-       if (unlikely(bio->bi_rw & REQ_PREFLUSH)) {
+       if (unlikely(bio->bi_rwf & REQ_PREFLUSH)) {
                md_flush_request(mddev, bio);
                return;
        }
index 1f123f5a29da2cb4859060fab44d69e32c388606..a93fbeffa3cc66c2f18e165149ab544887139c83 100644 (file)
@@ -285,7 +285,7 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
         */
        sectors = bio_sectors(bio);
        /* bio could be mergeable after passing to underlayer */
-       bio->bi_rw &= ~REQ_NOMERGE;
+       bio->bi_rwf &= ~REQ_NOMERGE;
        mddev->pers->make_request(mddev, bio);
 
        cpu = part_stat_lock();
@@ -414,7 +414,7 @@ static void md_submit_flush_data(struct work_struct *ws)
                /* an empty barrier - all done */
                bio_endio(bio);
        else {
-               bio->bi_rw &= ~REQ_PREFLUSH;
+               bio->bi_rwf &= ~REQ_PREFLUSH;
                mddev->pers->make_request(mddev, bio);
        }
 
index 72ea98e89e5787fd96b7ebfef2cc68cbf1a02107..d31e5ae789edab2bbf63eb27065cf68ba806c363 100644 (file)
@@ -90,7 +90,7 @@ static void multipath_end_request(struct bio *bio)
 
        if (!bio->bi_error)
                multipath_end_bh_io(mp_bh, 0);
-       else if (!(bio->bi_rw & REQ_RAHEAD)) {
+       else if (!(bio->bi_rwf & REQ_RAHEAD)) {
                /*
                 * oops, IO error:
                 */
@@ -111,7 +111,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
        struct multipath_bh * mp_bh;
        struct multipath_info *multipath;
 
-       if (unlikely(bio->bi_rw & REQ_PREFLUSH)) {
+       if (unlikely(bio->bi_rwf & REQ_PREFLUSH)) {
                md_flush_request(mddev, bio);
                return;
        }
@@ -134,7 +134,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
 
        mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
        mp_bh->bio.bi_bdev = multipath->rdev->bdev;
-       mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
+       mp_bh->bio.bi_rwf |= REQ_FAILFAST_TRANSPORT;
        mp_bh->bio.bi_end_io = multipath_end_request;
        mp_bh->bio.bi_private = mp_bh;
        generic_make_request(&mp_bh->bio);
@@ -355,7 +355,7 @@ static void multipathd(struct md_thread *thread)
                        bio->bi_iter.bi_sector +=
                                conf->multipaths[mp_bh->path].rdev->data_offset;
                        bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
-                       bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
+                       bio->bi_rwf |= REQ_FAILFAST_TRANSPORT;
                        bio->bi_end_io = multipath_end_request;
                        bio->bi_private = mp_bh;
                        generic_make_request(bio);
index c3d439083212357e1b8a1f26f92f45546a121884..a5619a3960f3af34ca06c79c54d01ddba6401d6a 100644 (file)
@@ -458,7 +458,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
        struct md_rdev *tmp_dev;
        struct bio *split;
 
-       if (unlikely(bio->bi_rw & REQ_PREFLUSH)) {
+       if (unlikely(bio->bi_rwf & REQ_PREFLUSH)) {
                md_flush_request(mddev, bio);
                return;
        }
index 4e6da4497553cde35149daa63caf74e4ecbe843b..766ddd4ed5ed120dbe55d5a8c025adddc8eae911 100644 (file)
@@ -1055,8 +1055,8 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
        unsigned long flags;
        const int op = bio_op(bio);
        const int rw = bio_data_dir(bio);
-       const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
-       const unsigned long do_flush_fua = (bio->bi_rw &
+       const unsigned long do_sync = (bio->bi_rwf & REQ_SYNC);
+       const unsigned long do_flush_fua = (bio->bi_rwf &
                                                (REQ_PREFLUSH | REQ_FUA));
        struct md_rdev *blocked_rdev;
        struct blk_plug_cb *cb;
@@ -2321,7 +2321,7 @@ read_more:
                raid_end_bio_io(r1_bio);
        } else {
                const unsigned long do_sync
-                       = r1_bio->master_bio->bi_rw & REQ_SYNC;
+                       = r1_bio->master_bio->bi_rwf & REQ_SYNC;
                if (bio) {
                        r1_bio->bios[r1_bio->read_disk] =
                                mddev->ro ? IO_BLOCKED : NULL;
index 26ae74fd0d01319aca8c8fbea08dc7c7d3d48ebf..cdb0ef729711b43141feb382d5afb78f7ee29103 100644 (file)
@@ -1060,8 +1060,8 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
        int i;
        const int op = bio_op(bio);
        const int rw = bio_data_dir(bio);
-       const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
-       const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
+       const unsigned long do_sync = (bio->bi_rwf & REQ_SYNC);
+       const unsigned long do_fua = (bio->bi_rwf & REQ_FUA);
        unsigned long flags;
        struct md_rdev *blocked_rdev;
        struct blk_plug_cb *cb;
@@ -1446,7 +1446,7 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
 
        struct bio *split;
 
-       if (unlikely(bio->bi_rw & REQ_PREFLUSH)) {
+       if (unlikely(bio->bi_rwf & REQ_PREFLUSH)) {
                md_flush_request(mddev, bio);
                return;
        }
@@ -2530,7 +2530,7 @@ read_more:
                return;
        }
 
-       do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
+       do_sync = (r10_bio->master_bio->bi_rwf & REQ_SYNC);
        slot = r10_bio->read_slot;
        printk_ratelimited(
                KERN_ERR
index 5504ce2bac06302712eedee6992f92b717def2dc..275271ee6013c8f01dc719173a0e5ac959502475 100644 (file)
@@ -536,7 +536,7 @@ int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
                bio_endio(bio);
                return 0;
        }
-       bio->bi_rw &= ~REQ_PREFLUSH;
+       bio->bi_rwf &= ~REQ_PREFLUSH;
        return -EAGAIN;
 }
 
index 6953d78297b0ccf633abeb7591ed23fdb469f639..e7b801c59436b3cf74d6f68acf4f3644e6e7e44c 100644 (file)
@@ -806,7 +806,7 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
        dd_idx = 0;
        while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
                dd_idx++;
-       if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw ||
+       if (head->dev[dd_idx].towrite->bi_rwf != sh->dev[dd_idx].towrite->bi_rwf ||
            bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite))
                goto unlock_out;
 
@@ -1003,7 +1003,7 @@ again:
 
                        pr_debug("%s: for %llu schedule op %d on disc %d\n",
                                __func__, (unsigned long long)sh->sector,
-                               bi->bi_rw, i);
+                               bi->bi_rwf, i);
                        atomic_inc(&sh->count);
                        if (sh != head_sh)
                                atomic_inc(&head_sh->count);
@@ -1014,7 +1014,7 @@ again:
                                bi->bi_iter.bi_sector = (sh->sector
                                                 + rdev->data_offset);
                        if (test_bit(R5_ReadNoMerge, &head_sh->dev[i].flags))
-                               bi->bi_rw |= REQ_NOMERGE;
+                               bi->bi_rwf |= REQ_NOMERGE;
 
                        if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
                                WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
@@ -1055,7 +1055,7 @@ again:
                        pr_debug("%s: for %llu schedule op %d on "
                                 "replacement disc %d\n",
                                __func__, (unsigned long long)sh->sector,
-                               rbi->bi_rw, i);
+                               rbi->bi_rwf, i);
                        atomic_inc(&sh->count);
                        if (sh != head_sh)
                                atomic_inc(&head_sh->count);
@@ -1088,7 +1088,7 @@ again:
                        if (op_is_write(op))
                                set_bit(STRIPE_DEGRADED, &sh->state);
                        pr_debug("skip op %d on disc %d for sector %llu\n",
-                               bi->bi_rw, i, (unsigned long long)sh->sector);
+                               bi->bi_rwf, i, (unsigned long long)sh->sector);
                        clear_bit(R5_LOCKED, &sh->dev[i].flags);
                        set_bit(STRIPE_HANDLE, &sh->state);
                }
@@ -1619,9 +1619,9 @@ again:
 
                        while (wbi && wbi->bi_iter.bi_sector <
                                dev->sector + STRIPE_SECTORS) {
-                               if (wbi->bi_rw & REQ_FUA)
+                               if (wbi->bi_rwf & REQ_FUA)
                                        set_bit(R5_WantFUA, &dev->flags);
-                               if (wbi->bi_rw & REQ_SYNC)
+                               if (wbi->bi_rwf & REQ_SYNC)
                                        set_bit(R5_SyncIO, &dev->flags);
                                if (bio_op(wbi) == REQ_OP_DISCARD)
                                        set_bit(R5_Discard, &dev->flags);
@@ -5150,7 +5150,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
        DEFINE_WAIT(w);
        bool do_prepare;
 
-       if (unlikely(bi->bi_rw & REQ_PREFLUSH)) {
+       if (unlikely(bi->bi_rwf & REQ_PREFLUSH)) {
                int ret = r5l_handle_flush_request(conf->log, bi);
 
                if (ret == 0)
@@ -5233,7 +5233,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
                        (unsigned long long)logical_sector);
 
                sh = raid5_get_active_stripe(conf, new_sector, previous,
-                                      (bi->bi_rw & REQ_RAHEAD), 0);
+                                      (bi->bi_rwf & REQ_RAHEAD), 0);
                if (sh) {
                        if (unlikely(previous)) {
                                /* expansion might have moved on while waiting for a
@@ -5301,7 +5301,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
                        set_bit(STRIPE_HANDLE, &sh->state);
                        clear_bit(STRIPE_DELAYED, &sh->state);
                        if ((!sh->batch_head || sh == sh->batch_head) &&
-                           (bi->bi_rw & REQ_SYNC) &&
+                           (bi->bi_rwf & REQ_SYNC) &&
                            !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
                                atomic_inc(&conf->preread_active_stripes);
                        release_stripe_plug(mddev, sh);
index 5d5cae05818da1a450ae7e1eafdc4db233ca8186..bcb27456c61953516151532f6a86d34493dd5e4f 100644 (file)
@@ -2945,7 +2945,7 @@ static void __btrfsic_submit_bio(struct bio *bio)
                        printk(KERN_INFO
                               "submit_bio(rw=%d,0x%x, bi_vcnt=%u,"
                               " bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
-                              bio_op(bio), bio->bi_rw, bio->bi_vcnt,
+                              bio_op(bio), bio->bi_rwf, bio->bi_vcnt,
                               (unsigned long long)bio->bi_iter.bi_sector,
                               dev_bytenr, bio->bi_bdev);
 
@@ -2976,18 +2976,18 @@ static void __btrfsic_submit_bio(struct bio *bio)
                btrfsic_process_written_block(dev_state, dev_bytenr,
                                              mapped_datav, bio->bi_vcnt,
                                              bio, &bio_is_patched,
-                                             NULL, bio->bi_rw);
+                                             NULL, bio->bi_rwf);
                while (i > 0) {
                        i--;
                        kunmap(bio->bi_io_vec[i].bv_page);
                }
                kfree(mapped_datav);
-       } else if (NULL != dev_state && (bio->bi_rw & REQ_PREFLUSH)) {
+       } else if (NULL != dev_state && (bio->bi_rwf & REQ_PREFLUSH)) {
                if (dev_state->state->print_mask &
                    BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
                        printk(KERN_INFO
                               "submit_bio(rw=%d,0x%x FLUSH, bdev=%p)\n",
-                              bio_op(bio), bio->bi_rw, bio->bi_bdev);
+                              bio_op(bio), bio->bi_rwf, bio->bi_bdev);
                if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
                        if ((dev_state->state->print_mask &
                             (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
@@ -3005,7 +3005,7 @@ static void __btrfsic_submit_bio(struct bio *bio)
                        block->never_written = 0;
                        block->iodone_w_error = 0;
                        block->flush_gen = dev_state->last_flush_gen + 1;
-                       block->submit_bio_bh_rw = bio->bi_rw;
+                       block->submit_bio_bh_rw = bio->bi_rwf;
                        block->orig_bio_bh_private = bio->bi_private;
                        block->orig_bio_bh_end_io.bio = bio->bi_end_io;
                        block->next_in_same_bio = NULL;
index 9a726ded2c6d150e73bd2719a09cb95a8634ab57..191304043fa3d0e014ff78bf7c5bdb57c62a0b1f 100644 (file)
@@ -870,7 +870,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
 
        atomic_inc(&fs_info->nr_async_submits);
 
-       if (bio->bi_rw & REQ_SYNC)
+       if (bio->bi_rwf & REQ_SYNC)
                btrfs_set_work_high_priority(&async->work);
 
        btrfs_queue_work(fs_info->workers, &async->work);
index df731c0ebec7964844121eafa45a12d7bec1408b..d6b48209c9882a039d9e659ea19a71624a4f6cdb 100644 (file)
@@ -8197,7 +8197,7 @@ static void btrfs_end_dio_bio(struct bio *bio)
        if (err)
                btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
                           "direct IO failed ino %llu rw %d,%u sector %#Lx len %u err no %d",
-                          btrfs_ino(dip->inode), bio_op(bio), bio->bi_rw,
+                          btrfs_ino(dip->inode), bio_op(bio), bio->bi_rwf,
                           (unsigned long long)bio->bi_iter.bi_sector,
                           bio->bi_iter.bi_size, err);
 
@@ -8361,7 +8361,7 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip,
        if (!bio)
                return -ENOMEM;
 
-       bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_rw);
+       bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_rwf);
        bio->bi_private = dip;
        bio->bi_end_io = btrfs_end_dio_bio;
        btrfs_io_bio(bio)->logical = file_offset;
@@ -8399,7 +8399,7 @@ next_block:
                                                  start_sector, GFP_NOFS);
                        if (!bio)
                                goto out_err;
-                       bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_rw);
+                       bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_rwf);
                        bio->bi_private = dip;
                        bio->bi_end_io = btrfs_end_dio_bio;
                        btrfs_io_bio(bio)->logical = file_offset;
index 0fb4a959012e0db9cff59828d6734d0cdfc761f5..075534c972079a2234af86dc149a3cf5cfcc8353 100644 (file)
@@ -5954,7 +5954,7 @@ static void btrfs_end_bio(struct bio *bio)
                                else
                                        btrfs_dev_stat_inc(dev,
                                                BTRFS_DEV_STAT_READ_ERRS);
-                               if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
+                               if ((bio->bi_rwf & WRITE_FLUSH) == WRITE_FLUSH)
                                        btrfs_dev_stat_inc(dev,
                                                BTRFS_DEV_STAT_FLUSH_ERRS);
                                btrfs_dev_stat_print_on_error(dev);
@@ -6031,7 +6031,7 @@ static noinline void btrfs_schedule_bio(struct btrfs_root *root,
        bio->bi_next = NULL;
 
        spin_lock(&device->io_lock);
-       if (bio->bi_rw & REQ_SYNC)
+       if (bio->bi_rwf & REQ_SYNC)
                pending_bios = &device->pending_sync_bios;
        else
                pending_bios = &device->pending_bios;
@@ -6069,7 +6069,7 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
                rcu_read_lock();
                name = rcu_dereference(dev->name);
                pr_debug("btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu "
-                        "(%s id %llu), size=%u\n", bio_op(bio), bio->bi_rw,
+                        "(%s id %llu), size=%u\n", bio_op(bio), bio->bi_rwf,
                         (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev,
                         name->str, dev->devid, bio->bi_iter.bi_size);
                rcu_read_unlock();
index e09a8895fc31d1a348606a29dd06e46425270595..7006159e47a1e79f80ab4c33b8304a35f17770f3 100644 (file)
@@ -95,7 +95,7 @@ static inline bool bio_is_rw(struct bio *bio)
 
 static inline bool bio_mergeable(struct bio *bio)
 {
-       if (bio->bi_rw & REQ_NOMERGE_FLAGS)
+       if (bio->bi_rwf & REQ_NOMERGE_FLAGS)
                return false;
 
        return true;
@@ -318,7 +318,7 @@ struct bio_integrity_payload {
 
 static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
 {
-       if (bio->bi_rw & REQ_INTEGRITY)
+       if (bio->bi_rwf & REQ_INTEGRITY)
                return bio->bi_integrity;
 
        return NULL;
index f77150a4a96aca88e2508e18a50973877a8cd0c4..c8402f3ce7291f4d3d3a0ed531a8a4142d71f157 100644 (file)
@@ -714,9 +714,9 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
 
        if (!throtl) {
                blkg = blkg ?: q->root_blkg;
-               blkg_rwstat_add(&blkg->stat_bytes, bio_op(bio), bio->bi_rw,
+               blkg_rwstat_add(&blkg->stat_bytes, bio_op(bio), bio->bi_rwf,
                                bio->bi_iter.bi_size);
-               blkg_rwstat_add(&blkg->stat_ios, bio_op(bio), bio->bi_rw, 1);
+               blkg_rwstat_add(&blkg->stat_ios, bio_op(bio), bio->bi_rwf, 1);
        }
 
        rcu_read_unlock();
index f254eb264924a5d17255cfe31f07f2b628781407..0097eb01ebc427abb7271de59985f30d30d5c991 100644 (file)
@@ -27,8 +27,9 @@ struct bio {
        struct bio              *bi_next;       /* request queue link */
        struct block_device     *bi_bdev;
        int                     bi_error;
-       unsigned int            bi_rw;          /* bottom bits req flags,
-                                                * top bits REQ_OP
+       unsigned int            bi_rwf;         /* bottom bits req flags,
+                                                * top bits REQ_OP. Use
+                                                * accessors.
                                                 */
        unsigned short          bi_flags;       /* status, command, etc */
        unsigned short          bi_ioprio;
@@ -89,13 +90,13 @@ struct bio {
 };
 
 #define BIO_OP_SHIFT   (8 * sizeof(unsigned int) - REQ_OP_BITS)
-#define bio_op(bio)    ((bio)->bi_rw >> BIO_OP_SHIFT)
+#define bio_op(bio)    ((bio)->bi_rwf >> BIO_OP_SHIFT)
 
 #define bio_set_op_attrs(bio, op, op_flags) do {               \
        WARN_ON(op >= (1 << REQ_OP_BITS));                      \
-       (bio)->bi_rw &= ((1 << BIO_OP_SHIFT) - 1);              \
-       (bio)->bi_rw |= ((unsigned int) (op) << BIO_OP_SHIFT);  \
-       (bio)->bi_rw |= op_flags;                               \
+       (bio)->bi_rwf &= ((1 << BIO_OP_SHIFT) - 1);             \
+       (bio)->bi_rwf |= ((unsigned int) (op) << BIO_OP_SHIFT); \
+       (bio)->bi_rwf |= op_flags;                              \
 } while (0)
 
 #define BIO_RESET_BYTES                offsetof(struct bio, bi_max_vecs)
@@ -138,7 +139,7 @@ struct bio {
 
 /*
  * Request flags.  For use in the cmd_flags field of struct request, and in
- * bi_rw of struct bio.  Note that some flags are only valid in either one.
+ * bi_rwf of struct bio.  Note that some flags are only valid in either one.
  */
 enum rq_flag_bits {
        /* common flags */
index 65673d8b81ac4fc30306c8e1a5bd70a05fbeb64b..819888377bed7a14c33c4f0dd938b625bb296bea 100644 (file)
@@ -27,7 +27,7 @@ DECLARE_EVENT_CLASS(bcache_request,
                __entry->sector         = bio->bi_iter.bi_sector;
                __entry->orig_sector    = bio->bi_iter.bi_sector - 16;
                __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
-               blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+               blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rwf,
                              bio->bi_iter.bi_size);
        ),
 
@@ -102,7 +102,7 @@ DECLARE_EVENT_CLASS(bcache_bio,
                __entry->dev            = bio->bi_bdev->bd_dev;
                __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
-               blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+               blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rwf,
                              bio->bi_iter.bi_size);
        ),
 
@@ -138,7 +138,7 @@ TRACE_EVENT(bcache_read,
                __entry->dev            = bio->bi_bdev->bd_dev;
                __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
-               blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+               blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rwf,
                              bio->bi_iter.bi_size);
                __entry->cache_hit = hit;
                __entry->bypass = bypass;
@@ -170,7 +170,7 @@ TRACE_EVENT(bcache_write,
                __entry->inode          = inode;
                __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
-               blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+               blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rwf,
                              bio->bi_iter.bi_size);
                __entry->writeback = writeback;
                __entry->bypass = bypass;
index 5a2a7592068f2d3fc9d345c498f85183bc65053a..407e3d3ed5fc321a71c9d67a6efd42e889e4ef3b 100644 (file)
@@ -274,7 +274,7 @@ TRACE_EVENT(block_bio_bounce,
                                          bio->bi_bdev->bd_dev : 0;
                __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio_sectors(bio);
-               blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+               blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rwf,
                              bio->bi_iter.bi_size);
                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
        ),
@@ -313,7 +313,7 @@ TRACE_EVENT(block_bio_complete,
                __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio_sectors(bio);
                __entry->error          = error;
-               blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+               blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rwf,
                              bio->bi_iter.bi_size);
        ),
 
@@ -341,7 +341,7 @@ DECLARE_EVENT_CLASS(block_bio_merge,
                __entry->dev            = bio->bi_bdev->bd_dev;
                __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio_sectors(bio);
-               blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+               blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rwf,
                              bio->bi_iter.bi_size);
                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
        ),
@@ -409,7 +409,7 @@ TRACE_EVENT(block_bio_queue,
                __entry->dev            = bio->bi_bdev->bd_dev;
                __entry->sector         = bio->bi_iter.bi_sector;
                __entry->nr_sector      = bio_sectors(bio);
-               blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+               blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rwf,
                              bio->bi_iter.bi_size);
                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
        ),
@@ -439,7 +439,7 @@ DECLARE_EVENT_CLASS(block_get_rq,
                __entry->sector         = bio ? bio->bi_iter.bi_sector : 0;
                __entry->nr_sector      = bio ? bio_sectors(bio) : 0;
                blk_fill_rwbs(__entry->rwbs, bio ? bio_op(bio) : 0,
-                             bio ? bio->bi_rw : 0, __entry->nr_sector);
+                             bio ? bio->bi_rwf : 0, __entry->nr_sector);
                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
         ),
 
@@ -573,7 +573,7 @@ TRACE_EVENT(block_split,
                __entry->dev            = bio->bi_bdev->bd_dev;
                __entry->sector         = bio->bi_iter.bi_sector;
                __entry->new_sector     = new_sector;
-               blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+               blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rwf,
                              bio->bi_iter.bi_size);
                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
        ),
@@ -617,7 +617,7 @@ TRACE_EVENT(block_bio_remap,
                __entry->nr_sector      = bio_sectors(bio);
                __entry->old_dev        = dev;
                __entry->old_sector     = from;
-               blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+               blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rwf,
                              bio->bi_iter.bi_size);
        ),
 
index fb345cd11883e5406df22d8800f920974afeccc9..b99cbd3c3aa5392a2d5f9f420184e5b0d7f8f5c5 100644 (file)
@@ -776,7 +776,7 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
                return;
 
        __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
-                       bio_op(bio), bio->bi_rw, what, error, 0, NULL);
+                       bio_op(bio), bio->bi_rwf, what, error, 0, NULL);
 }
 
 static void blk_add_trace_bio_bounce(void *ignore,
@@ -881,7 +881,7 @@ static void blk_add_trace_split(void *ignore,
                __be64 rpdu = cpu_to_be64(pdu);
 
                __blk_add_trace(bt, bio->bi_iter.bi_sector,
-                               bio->bi_iter.bi_size, bio_op(bio), bio->bi_rw,
+                               bio->bi_iter.bi_size, bio_op(bio), bio->bi_rwf,
                                BLK_TA_SPLIT, bio->bi_error, sizeof(rpdu),
                                &rpdu);
        }
@@ -915,7 +915,7 @@ static void blk_add_trace_bio_remap(void *ignore,
        r.sector_from = cpu_to_be64(from);
 
        __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
-                       bio_op(bio), bio->bi_rw, BLK_TA_REMAP, bio->bi_error,
+                       bio_op(bio), bio->bi_rwf, BLK_TA_REMAP, bio->bi_error,
                        sizeof(r), &r);
 }