Merge tag 'xfs-5.3-merge-12' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
[linux-2.6-block.git] / block / blk-core.c
index 8340f69670d89625a8adc55a24e9c4e547555d7f..5d1fc8e17dd168a7e2723b2a90699491ccad06bd 100644 (file)
@@ -120,6 +120,42 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
 }
 EXPORT_SYMBOL(blk_rq_init);
 
+#define REQ_OP_NAME(name) [REQ_OP_##name] = #name
+static const char *const blk_op_name[] = {
+       REQ_OP_NAME(READ),
+       REQ_OP_NAME(WRITE),
+       REQ_OP_NAME(FLUSH),
+       REQ_OP_NAME(DISCARD),
+       REQ_OP_NAME(SECURE_ERASE),
+       REQ_OP_NAME(ZONE_RESET),
+       REQ_OP_NAME(WRITE_SAME),
+       REQ_OP_NAME(WRITE_ZEROES),
+       REQ_OP_NAME(SCSI_IN),
+       REQ_OP_NAME(SCSI_OUT),
+       REQ_OP_NAME(DRV_IN),
+       REQ_OP_NAME(DRV_OUT),
+};
+#undef REQ_OP_NAME
+
+/**
+ * blk_op_str - Return string XXX in the REQ_OP_XXX.
+ * @op: REQ_OP_XXX.
+ *
+ * Description: Centralize block layer function to convert REQ_OP_XXX into
+ * string format. Useful in the debugging and tracing bio or request. For
+ * invalid REQ_OP_XXX it returns string "UNKNOWN".
+ */
+inline const char *blk_op_str(unsigned int op)
+{
+       const char *op_str = "UNKNOWN";
+
+       if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
+               op_str = blk_op_name[op];
+
+       return op_str;
+}
+EXPORT_SYMBOL_GPL(blk_op_str);
+
 static const struct {
        int             errno;
        const char      *name;
@@ -167,18 +203,23 @@ int blk_status_to_errno(blk_status_t status)
 }
 EXPORT_SYMBOL_GPL(blk_status_to_errno);
 
-static void print_req_error(struct request *req, blk_status_t status)
+static void print_req_error(struct request *req, blk_status_t status,
+               const char *caller)
 {
        int idx = (__force int)status;
 
        if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
                return;
 
-       printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu flags %x\n",
-                               __func__, blk_errors[idx].name,
-                               req->rq_disk ?  req->rq_disk->disk_name : "?",
-                               (unsigned long long)blk_rq_pos(req),
-                               req->cmd_flags);
+       printk_ratelimited(KERN_ERR
+               "%s: %s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
+               "phys_seg %u prio class %u\n",
+               caller, blk_errors[idx].name,
+               req->rq_disk ? req->rq_disk->disk_name : "?",
+               blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
+               req->cmd_flags & ~REQ_OP_MASK,
+               req->nr_phys_segments,
+               IOPRIO_PRIO_CLASS(req->ioprio));
 }
 
 static void req_bio_endio(struct request *rq, struct bio *bio,
@@ -550,15 +591,15 @@ void blk_put_request(struct request *req)
 }
 EXPORT_SYMBOL(blk_put_request);
 
-bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
-                           struct bio *bio)
+bool bio_attempt_back_merge(struct request *req, struct bio *bio,
+               unsigned int nr_segs)
 {
        const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
 
-       if (!ll_back_merge_fn(q, req, bio))
+       if (!ll_back_merge_fn(req, bio, nr_segs))
                return false;
 
-       trace_block_bio_backmerge(q, req, bio);
+       trace_block_bio_backmerge(req->q, req, bio);
 
        if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
                blk_rq_set_mixed_merge(req);
@@ -571,15 +612,15 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
        return true;
 }
 
-bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
-                            struct bio *bio)
+bool bio_attempt_front_merge(struct request *req, struct bio *bio,
+               unsigned int nr_segs)
 {
        const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
 
-       if (!ll_front_merge_fn(q, req, bio))
+       if (!ll_front_merge_fn(req, bio, nr_segs))
                return false;
 
-       trace_block_bio_frontmerge(q, req, bio);
+       trace_block_bio_frontmerge(req->q, req, bio);
 
        if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
                blk_rq_set_mixed_merge(req);
@@ -621,6 +662,7 @@ no_merge:
  * blk_attempt_plug_merge - try to merge with %current's plugged list
  * @q: request_queue new bio is being queued at
  * @bio: new bio being queued
+ * @nr_segs: number of segments in @bio
  * @same_queue_rq: pointer to &struct request that gets filled in when
  * another request associated with @q is found on the plug list
  * (optional, may be %NULL)
@@ -639,7 +681,7 @@ no_merge:
  * Caller must ensure !blk_queue_nomerges(q) beforehand.
  */
 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
-                           struct request **same_queue_rq)
+               unsigned int nr_segs, struct request **same_queue_rq)
 {
        struct blk_plug *plug;
        struct request *rq;
@@ -668,10 +710,10 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
 
                switch (blk_try_merge(rq, bio)) {
                case ELEVATOR_BACK_MERGE:
-                       merged = bio_attempt_back_merge(q, rq, bio);
+                       merged = bio_attempt_back_merge(rq, bio, nr_segs);
                        break;
                case ELEVATOR_FRONT_MERGE:
-                       merged = bio_attempt_front_merge(q, rq, bio);
+                       merged = bio_attempt_front_merge(rq, bio, nr_segs);
                        break;
                case ELEVATOR_DISCARD_MERGE:
                        merged = bio_attempt_discard_merge(q, rq, bio);
@@ -687,18 +729,6 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
        return false;
 }
 
-void blk_init_request_from_bio(struct request *req, struct bio *bio)
-{
-       if (bio->bi_opf & REQ_RAHEAD)
-               req->cmd_flags |= REQ_FAILFAST_MASK;
-
-       req->__sector = bio->bi_iter.bi_sector;
-       req->ioprio = bio_prio(bio);
-       req->write_hint = bio->bi_write_hint;
-       blk_rq_bio_prep(req->q, req, bio);
-}
-EXPORT_SYMBOL_GPL(blk_init_request_from_bio);
-
 static void handle_bad_sector(struct bio *bio, sector_t maxsector)
 {
        char b[BDEVNAME_SIZE];
@@ -1163,7 +1193,7 @@ static int blk_cloned_rq_check_limits(struct request_queue *q,
         * Recalculate it to check the request correctly on this queue's
         * limitation.
         */
-       blk_recalc_rq_segments(rq);
+       rq->nr_phys_segments = blk_recalc_rq_segments(rq);
        if (rq->nr_phys_segments > queue_max_segments(q)) {
                printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
                        __func__, rq->nr_phys_segments, queue_max_segments(q));
@@ -1348,7 +1378,7 @@ EXPORT_SYMBOL_GPL(blk_steal_bios);
  *
  *     This special helper function is only for request stacking drivers
  *     (e.g. request-based dm) so that they can handle partial completion.
- *     Actual device drivers should use blk_end_request instead.
+ *     Actual device drivers should use blk_mq_end_request instead.
  *
  *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
  *     %false return from this function.
@@ -1373,7 +1403,7 @@ bool blk_update_request(struct request *req, blk_status_t error,
 
        if (unlikely(error && !blk_rq_is_passthrough(req) &&
                     !(req->rq_flags & RQF_QUIET)))
-               print_req_error(req, error);
+               print_req_error(req, error, __func__);
 
        blk_account_io_completion(req, nr_bytes);
 
@@ -1432,28 +1462,13 @@ bool blk_update_request(struct request *req, blk_status_t error,
                }
 
                /* recalculate the number of segments */
-               blk_recalc_rq_segments(req);
+               req->nr_phys_segments = blk_recalc_rq_segments(req);
        }
 
        return true;
 }
 EXPORT_SYMBOL_GPL(blk_update_request);
 
-void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
-                    struct bio *bio)
-{
-       if (bio_has_data(bio))
-               rq->nr_phys_segments = bio_phys_segments(q, bio);
-       else if (bio_op(bio) == REQ_OP_DISCARD)
-               rq->nr_phys_segments = 1;
-
-       rq->__data_len = bio->bi_iter.bi_size;
-       rq->bio = rq->biotail = bio;
-
-       if (bio->bi_disk)
-               rq->rq_disk = bio->bi_disk;
-}
-
 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
 /**
  * rq_flush_dcache_pages - Helper function to flush all pages in a request