block: better deal with the delayed not supported case in blk_cloned_rq_check_limits
authorRitika Srivastava <ritika.srivastava@oracle.com>
Tue, 1 Sep 2020 20:17:31 +0000 (13:17 -0700)
committerJens Axboe <axboe@kernel.dk>
Wed, 2 Sep 2020 01:38:33 +0000 (19:38 -0600)
If WRITE_ZERO/WRITE_SAME operation is not supported by the storage,
blk_cloned_rq_check_limits() will return IO error which will cause
device-mapper to fail the paths.

Instead, if the queue limit is set to 0, return BLK_STS_NOTSUPP.
BLK_STS_NOTSUPP will be ignored by device-mapper and will not fail the
paths.

Suggested-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Ritika Srivastava <ritika.srivastava@oracle.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-core.c

index 3fbb5b2d5385da1a0fe01b7c63b6592e073ccab0..9f2a99abeeb9338c0e4c70809eca11f8154dbf83 100644 (file)
@@ -1148,10 +1148,24 @@ EXPORT_SYMBOL(submit_bio);
 static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q,
                                      struct request *rq)
 {
-       if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) {
+       unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
+
+       if (blk_rq_sectors(rq) > max_sectors) {
+               /*
+                * SCSI device does not have a good way to return if
+                * Write Same/Zero is actually supported. If a device rejects
+                * a non-read/write command (discard, write same,etc.) the
+                * low-level device driver will set the relevant queue limit to
+                * 0 to prevent blk-lib from issuing more of the offending
+                * operations. Commands queued prior to the queue limit being
+                * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O
+                * errors being propagated to upper layers.
+                */
+               if (max_sectors == 0)
+                       return BLK_STS_NOTSUPP;
+
                printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
-                       __func__, blk_rq_sectors(rq),
-                       blk_queue_get_max_sectors(q, req_op(rq)));
+                       __func__, blk_rq_sectors(rq), max_sectors);
                return BLK_STS_IOERR;
        }
 
@@ -1178,8 +1192,11 @@ static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q,
  */
 blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
 {
-       if (blk_cloned_rq_check_limits(q, rq))
-               return BLK_STS_IOERR;
+       blk_status_t ret;
+
+       ret = blk_cloned_rq_check_limits(q, rq);
+       if (ret != BLK_STS_OK)
+               return ret;
 
        if (rq->rq_disk &&
            should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))